LibC: Rework malloc locking add proper realloc
This commit is contained in:
parent
1c44d24b76
commit
543bb3cc4b
|
@ -47,8 +47,8 @@ struct malloc_pool_t
|
||||||
|
|
||||||
malloc_node_t* free_list;
|
malloc_node_t* free_list;
|
||||||
|
|
||||||
uint8_t* end() { return start + size; }
|
uint8_t* end() const { return start + size; }
|
||||||
bool contains(malloc_node_t* node) { return start <= (uint8_t*)node && (uint8_t*)node < end(); }
|
bool contains(malloc_node_t* node) const { return start <= (uint8_t*)node && (uint8_t*)node->next() <= end(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct malloc_info_t
|
struct malloc_info_t
|
||||||
|
@ -73,7 +73,7 @@ struct malloc_info_t
|
||||||
static malloc_info_t s_malloc_info;
|
static malloc_info_t s_malloc_info;
|
||||||
static auto& s_malloc_pools = s_malloc_info.pools;
|
static auto& s_malloc_pools = s_malloc_info.pools;
|
||||||
|
|
||||||
static pthread_spinlock_t s_malloc_lock;
|
static pthread_mutex_t s_malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
static bool allocate_pool(size_t pool_index)
|
static bool allocate_pool(size_t pool_index)
|
||||||
{
|
{
|
||||||
|
@ -117,6 +117,42 @@ static void remove_node_from_pool_free_list(malloc_pool_t& pool, malloc_node_t*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void merge_following_free_nodes(malloc_pool_t& pool, malloc_node_t* node)
|
||||||
|
{
|
||||||
|
while (!node->last && !node->next()->allocated)
|
||||||
|
{
|
||||||
|
auto* next = node->next();
|
||||||
|
remove_node_from_pool_free_list(pool, next);
|
||||||
|
node->last = next->last;
|
||||||
|
node->size += next->size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void shrink_node_if_needed(malloc_pool_t& pool, malloc_node_t* node, size_t size)
|
||||||
|
{
|
||||||
|
assert(size <= node->data_size());
|
||||||
|
if (node->data_size() - size < sizeof(malloc_node_t) + s_malloc_shrink_threshold)
|
||||||
|
return;
|
||||||
|
|
||||||
|
uint8_t* node_end = (uint8_t*)node->next();
|
||||||
|
|
||||||
|
node->size = sizeof(malloc_node_t) + size;
|
||||||
|
|
||||||
|
auto* next = node->next();
|
||||||
|
next->allocated = false;
|
||||||
|
next->size = node_end - (uint8_t*)next;
|
||||||
|
next->last = node->last;
|
||||||
|
|
||||||
|
node->last = false;
|
||||||
|
|
||||||
|
// insert excess node to free list
|
||||||
|
if (pool.free_list)
|
||||||
|
pool.free_list->prev_free = next;
|
||||||
|
next->next_free = pool.free_list;
|
||||||
|
next->prev_free = nullptr;
|
||||||
|
pool.free_list = next;
|
||||||
|
}
|
||||||
|
|
||||||
static void* allocate_from_pool(size_t pool_index, size_t size)
|
static void* allocate_from_pool(size_t pool_index, size_t size)
|
||||||
{
|
{
|
||||||
assert(size % s_malloc_default_align == 0);
|
assert(size % s_malloc_default_align == 0);
|
||||||
|
@ -131,43 +167,14 @@ static void* allocate_from_pool(size_t pool_index, size_t size)
|
||||||
{
|
{
|
||||||
assert(!node->allocated);
|
assert(!node->allocated);
|
||||||
|
|
||||||
// merge nodes right after current one
|
merge_following_free_nodes(pool, node);
|
||||||
while (!node->last && !node->next()->allocated)
|
|
||||||
{
|
|
||||||
auto* next = node->next();
|
|
||||||
remove_node_from_pool_free_list(pool, next);
|
|
||||||
node->last = next->last;
|
|
||||||
node->size += next->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (node->data_size() < size)
|
if (node->data_size() < size)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
node->allocated = true;
|
node->allocated = true;
|
||||||
remove_node_from_pool_free_list(pool, node);
|
remove_node_from_pool_free_list(pool, node);
|
||||||
|
|
||||||
// shrink node if needed
|
shrink_node_if_needed(pool, node, size);
|
||||||
if (node->data_size() - size >= sizeof(malloc_node_t) + s_malloc_shrink_threshold)
|
|
||||||
{
|
|
||||||
uint8_t* node_end = (uint8_t*)node->next();
|
|
||||||
|
|
||||||
node->size = sizeof(malloc_node_t) + size;
|
|
||||||
|
|
||||||
auto* next = node->next();
|
|
||||||
next->allocated = false;
|
|
||||||
next->size = node_end - (uint8_t*)next;
|
|
||||||
next->last = node->last;
|
|
||||||
|
|
||||||
node->last = false;
|
|
||||||
|
|
||||||
// insert excess node to free list
|
|
||||||
if (pool.free_list)
|
|
||||||
pool.free_list->prev_free = next;
|
|
||||||
next->next_free = pool.free_list;
|
|
||||||
next->prev_free = nullptr;
|
|
||||||
pool.free_list = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
return node->data;
|
return node->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,18 +206,19 @@ void* malloc(size_t size)
|
||||||
size_t first_usable_pool = 0;
|
size_t first_usable_pool = 0;
|
||||||
while (s_malloc_pools[first_usable_pool].size - sizeof(malloc_node_t) < size)
|
while (s_malloc_pools[first_usable_pool].size - sizeof(malloc_node_t) < size)
|
||||||
first_usable_pool++;
|
first_usable_pool++;
|
||||||
// first_usable_pool = ceil(log(size/s_malloc_smallest_pool, s_malloc_pool_size_mult))
|
|
||||||
|
pthread_mutex_lock(&s_malloc_mutex);
|
||||||
|
|
||||||
// try to find any already existing pools that we can allocate in
|
// try to find any already existing pools that we can allocate in
|
||||||
for (size_t i = first_usable_pool; i < s_malloc_pool_count; i++)
|
for (size_t i = first_usable_pool; i < s_malloc_pool_count; i++)
|
||||||
{
|
{
|
||||||
if (s_malloc_pools[i].start == nullptr)
|
if (s_malloc_pools[i].start == nullptr)
|
||||||
continue;
|
continue;
|
||||||
pthread_spin_lock(&s_malloc_lock);
|
|
||||||
void* ret = allocate_from_pool(i, size);
|
void* ret = allocate_from_pool(i, size);
|
||||||
pthread_spin_unlock(&s_malloc_lock);
|
if (ret == nullptr)
|
||||||
if (ret != nullptr)
|
continue;
|
||||||
return ret;
|
pthread_mutex_unlock(&s_malloc_mutex);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate new pool
|
// allocate new pool
|
||||||
|
@ -218,18 +226,17 @@ void* malloc(size_t size)
|
||||||
{
|
{
|
||||||
if (s_malloc_pools[i].start != nullptr)
|
if (s_malloc_pools[i].start != nullptr)
|
||||||
continue;
|
continue;
|
||||||
|
void* ret = allocate_pool(i)
|
||||||
pthread_spin_lock(&s_malloc_lock);
|
? allocate_from_pool(i, size)
|
||||||
void* ret = nullptr;
|
: nullptr;
|
||||||
if (allocate_pool(i))
|
|
||||||
ret = allocate_from_pool(i, size);
|
|
||||||
pthread_spin_unlock(&s_malloc_lock);
|
|
||||||
|
|
||||||
if (ret == nullptr)
|
if (ret == nullptr)
|
||||||
break;
|
break;
|
||||||
|
pthread_mutex_unlock(&s_malloc_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&s_malloc_mutex);
|
||||||
|
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -245,21 +252,35 @@ void* realloc(void* ptr, size_t size)
|
||||||
if (size_t ret = size % s_malloc_default_align)
|
if (size_t ret = size % s_malloc_default_align)
|
||||||
size += s_malloc_default_align - ret;
|
size += s_malloc_default_align - ret;
|
||||||
|
|
||||||
|
pthread_mutex_lock(&s_malloc_mutex);
|
||||||
|
|
||||||
auto* node = node_from_data_pointer(ptr);
|
auto* node = node_from_data_pointer(ptr);
|
||||||
size_t oldsize = node->data_size();
|
auto& pool = pool_from_node(node);
|
||||||
|
|
||||||
if (oldsize == size)
|
assert(node->allocated);
|
||||||
|
|
||||||
|
const size_t oldsize = node->data_size();
|
||||||
|
|
||||||
|
// try to grow the node if needed
|
||||||
|
if (size > oldsize)
|
||||||
|
merge_following_free_nodes(pool, node);
|
||||||
|
|
||||||
|
const bool needs_allocation = node->data_size() < size;
|
||||||
|
|
||||||
|
shrink_node_if_needed(pool, node, needs_allocation ? oldsize : size);
|
||||||
|
|
||||||
|
pthread_mutex_unlock(&s_malloc_mutex);
|
||||||
|
|
||||||
|
if (!needs_allocation)
|
||||||
return ptr;
|
return ptr;
|
||||||
|
|
||||||
// TODO: try to shrink or expand allocation
|
|
||||||
|
|
||||||
// allocate new pointer
|
// allocate new pointer
|
||||||
void* new_ptr = malloc(size);
|
void* new_ptr = malloc(size);
|
||||||
if (new_ptr == nullptr)
|
if (new_ptr == nullptr)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// move data to the new pointer
|
// move data to the new pointer
|
||||||
size_t bytes_to_copy = oldsize < size ? oldsize : size;
|
const size_t bytes_to_copy = (oldsize < size) ? oldsize : size;
|
||||||
memcpy(new_ptr, ptr, bytes_to_copy);
|
memcpy(new_ptr, ptr, bytes_to_copy);
|
||||||
free(ptr);
|
free(ptr);
|
||||||
|
|
||||||
|
@ -273,22 +294,15 @@ void free(void* ptr)
|
||||||
if (ptr == nullptr)
|
if (ptr == nullptr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pthread_spin_lock(&s_malloc_lock);
|
pthread_mutex_lock(&s_malloc_mutex);
|
||||||
|
|
||||||
auto* node = node_from_data_pointer(ptr);
|
auto* node = node_from_data_pointer(ptr);
|
||||||
|
|
||||||
node->allocated = false;
|
|
||||||
|
|
||||||
auto& pool = pool_from_node(node);
|
auto& pool = pool_from_node(node);
|
||||||
|
|
||||||
// merge nodes right after freed one
|
assert(node->allocated);
|
||||||
while (!node->last && !node->next()->allocated)
|
node->allocated = false;
|
||||||
{
|
|
||||||
auto* next = node->next();
|
merge_following_free_nodes(pool, node);
|
||||||
remove_node_from_pool_free_list(pool, next);
|
|
||||||
node->last = next->last;
|
|
||||||
node->size += next->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
// add node to free list
|
// add node to free list
|
||||||
if (pool.free_list)
|
if (pool.free_list)
|
||||||
|
@ -297,22 +311,24 @@ void free(void* ptr)
|
||||||
node->next_free = pool.free_list;
|
node->next_free = pool.free_list;
|
||||||
pool.free_list = node;
|
pool.free_list = node;
|
||||||
|
|
||||||
pthread_spin_unlock(&s_malloc_lock);
|
pthread_mutex_unlock(&s_malloc_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* calloc(size_t nmemb, size_t size)
|
void* calloc(size_t nmemb, size_t size)
|
||||||
{
|
{
|
||||||
dprintln_if(DEBUG_MALLOC, "calloc({}, {})", nmemb, size);
|
dprintln_if(DEBUG_MALLOC, "calloc({}, {})", nmemb, size);
|
||||||
|
|
||||||
size_t total = nmemb * size;
|
const size_t total = nmemb * size;
|
||||||
if (size != 0 && total / size != nmemb)
|
if (size != 0 && total / size != nmemb)
|
||||||
{
|
{
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* ptr = malloc(total);
|
void* ptr = malloc(total);
|
||||||
if (ptr == nullptr)
|
if (ptr == nullptr)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
memset(ptr, 0, total);
|
memset(ptr, 0, total);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue