diff options
Diffstat (limited to 'pkmem.h')
| -rw-r--r-- | pkmem.h | 155 |
1 files changed, 63 insertions, 92 deletions
@@ -6,25 +6,17 @@ #include <stdint.h> #include <stdlib.h> -#ifndef PK_DEFAULT_BUCKET_SIZE -# define PK_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL) +#ifndef PK_MEM_DEFAULT_BUCKET_SIZE +# define PK_MEM_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL) #endif -#ifndef PK_MINIMUM_ALIGNMENT -# define PK_MINIMUM_ALIGNMENT 1 -#endif -#ifndef PK_MAXIMUM_ALIGNMENT -# define PK_MAXIMUM_ALIGNMENT 64 -#endif - -size_t pk_mem_calculate_bkt_size(size_t sz, size_t reserved_block_count); -struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags); -void pk_bucket_destroy(struct pk_membucket* bkt); -void pk_bucket_reset(struct pk_membucket* bkt); -void pk_bucket_set_client_bucket(struct pk_membucket *bkt); -void pk_memory_debug_print(struct pk_membucket *bkt); -void pk_memory_teardown_all(); -bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt); +size_t pk_mem_bucket_calculate_size(size_t sz, size_t reserved_block_count); +struct pk_membucket* pk_mem_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags); +void pk_mem_bucket_debug_print(struct pk_membucket *bkt); +void pk_mem_bucket_destroy(struct pk_membucket* bkt); +void pk_mem_bucket_reset(struct pk_membucket* bkt); +void pk_mem_bucket_set_client_mem_bucket(struct pk_membucket *bkt); +bool pk_mem_bucket_ptr_is_in_mem_bucket(const void* ptr, const struct pk_membucket* bkt); void* pk_new_base(size_t sz, size_t alignment); void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt); @@ -121,6 +113,13 @@ static inline void pkmem_stupid_header_warnings() { (void)stdout; } #if defined(PK_MEMORY_DEBUGGER) #endif +#ifndef PK_MINIMUM_ALIGNMENT +# define PK_MINIMUM_ALIGNMENT 1 +#endif +#ifndef PK_MEMORY_DEBUGGER_MAX_BUCKET_COUNT + #define PK_MEMORY_DEBUGGER_MAX_BUCKET_COUNT 16 +#endif + #define EXPECTED_PK_MEMBLOCK_SIZE 128 #define pk_memblock_blocks_idx(bkt, idx) ((bkt->block_capacity-1)-(idx)) @@ -165,17 +164,17 @@ struct pk_membucket { const char *description; // 96 #ifdef PK_MEMORY_DEBUGGER - uint32_t debug_bkt_index; - // 100 - uint32_t debug_head_l; + struct pk_memblock *debug_blocks; // 104 - uint32_t debug_head_r; + uint32_t debug_head_l; // 108 - char padding[4+(8*1)]; - // 120 - size_t debug_block_capacity; + uint32_t debug_head_r; + // 112 + uint32_t debug_block_capacity; + // 116 + char padding[(8*1)+4]; #else - char padding[8*4]; + char padding[(8*4)]; #endif // 128 // starting point for alloc'd data @@ -185,15 +184,8 @@ struct pk_membucket { static struct pk_membucket *client_bucket = NULL; -#ifdef PK_MEMORY_DEBUGGER -#include <stdatomic.h> -static struct pk_memblock *debug_allocs[16]; -static size_t debug_alloc_head = 0; -static mtx_t debug_mtx; -#endif - size_t -pk_mem_calculate_bkt_size(size_t sz, size_t reserved_block_count) +pk_mem_bucket_calculate_size(size_t sz, size_t reserved_block_count) { size_t base_size = EXPECTED_PK_MEMBLOCK_SIZE + sz + (sizeof(struct pk_memblock) * reserved_block_count); // This trick ensures that our array of pk_memblocks at the end is mem-aligned. @@ -203,50 +195,32 @@ pk_mem_calculate_bkt_size(size_t sz, size_t reserved_block_count) } bool -pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt) +pk_mem_bucket_ptr_is_in_mem_bucket(const void* ptr, const struct pk_membucket* bkt) { return (ptr >= (void*)bkt && ptr < (void*)pk_bkt_head(bkt)); } void -pk_memory_debug_print(struct pk_membucket *bkt) +pk_mem_bucket_debug_print(struct pk_membucket *bkt) { PK_LOG_INF("pk_membucket details:\n"); PK_LOGV_INF("\tbkt: %p\n", (void *)bkt); PK_LOGV_INF("\tdescription: %s\n", bkt->description); PK_LOGV_INF("\tsize: %lu\n", bkt->size); PK_LOGV_INF("\thead: %lu\n", bkt->head); - PK_LOGV_INF("\tallocs: %u\n", bkt->alloc_count); - PK_LOGV_INF("\tblock head_l: %u\n", bkt->block_head_l); - PK_LOGV_INF("\tblock head_r: %u\n", bkt->block_head_r); - PK_LOGV_INF("\tflags: %lu\n", bkt->flags); + PK_LOGV_INF("\tallocs: %u\n", bkt->alloc_count); + PK_LOGV_INF("\tblock head_l: %u\n", bkt->block_head_l); + PK_LOGV_INF("\tblock head_r: %u\n", bkt->block_head_r); + PK_LOGV_INF("\tflags: %lu\n", bkt->flags); #ifdef PK_MEMORY_DEBUGGER - PK_LOGV_INF("\tdebug index: %u\n", bkt->debug_bkt_index); - PK_LOGV_INF("\tdebug alloc head_l: %u\n", bkt->debug_head_l); - PK_LOGV_INF("\tdebug alloc head_r: %u\n", bkt->debug_head_r); - PK_LOGV_INF("\tdebug cappacity: %lu\n", bkt->debug_block_capacity); -#endif -} - -void -pk_memory_teardown_all() -{ - client_bucket = NULL; -#ifdef PK_MEMORY_DEBUGGER - mtx_lock(&debug_mtx); - for (size_t i = 0; i < debug_alloc_head; ++i) { - if (debug_allocs[i] != NULL) { - free(debug_allocs[i]); - } - debug_allocs[i] = NULL; - } - debug_alloc_head = 0; - mtx_unlock(&debug_mtx); + PK_LOGV_INF("\tdebug alloc head_l: %u\n", bkt->debug_head_l); + PK_LOGV_INF("\tdebug alloc head_r: %u\n", bkt->debug_head_r); + PK_LOGV_INF("\tdebug cappacity: %u\n", bkt->debug_block_capacity); #endif } struct pk_membucket* -pk_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags) +pk_mem_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags) { // 512 example: // [000-127] pk_membucket @@ -273,40 +247,27 @@ pk_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS fl bkt->blocks[pk_memblock_blocks_idx(bkt,0)].ptr = pk_bkt_data(bkt); #ifdef PK_MEMORY_DEBUGGER - mtx_lock(&debug_mtx); - bkt->debug_bkt_index = debug_alloc_head++; - mtx_unlock(&debug_mtx); bkt->debug_head_l = 0; bkt->debug_head_r = 0; bkt->debug_block_capacity = 128; - debug_allocs[bkt->debug_bkt_index] = (struct pk_memblock*)aligned_alloc(alignof(struct pk_memblock), sizeof(struct pk_memblock) * 128); + bkt->debug_blocks = (struct pk_memblock*)aligned_alloc(alignof(struct pk_memblock), sizeof(struct pk_memblock) * 128); #endif return bkt; } void -pk_bucket_destroy(struct pk_membucket* bkt) +pk_mem_bucket_destroy(struct pk_membucket* bkt) { assert(bkt != NULL); #ifdef PK_MEMORY_DEBUGGER - mtx_lock(&debug_mtx); - assert(bkt->debug_bkt_index <= debug_alloc_head); - if (debug_allocs[bkt->debug_bkt_index] != NULL) free(debug_allocs[bkt->debug_bkt_index]); - debug_allocs[bkt->debug_bkt_index] = NULL; - while (debug_alloc_head > 0) { - if (debug_allocs[debug_alloc_head-1] != NULL) { - break; - } - debug_alloc_head--; - } - mtx_unlock(&debug_mtx); + if (bkt->debug_blocks != NULL) free(bkt->debug_blocks); #endif free(bkt); } void -pk_bucket_reset(struct pk_membucket* bkt) +pk_mem_bucket_reset(struct pk_membucket* bkt) { if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == true) { PK_LOG_ERR("WARNING: pk_bucket_reset called on non-transient pk_membucket\n"); @@ -330,7 +291,7 @@ pk_bucket_reset(struct pk_membucket* bkt) #endif } -void pk_bucket_set_client_bucket(struct pk_membucket *bkt) { +void pk_mem_bucket_set_client_mem_bucket(struct pk_membucket *bkt) { client_bucket = bkt; } @@ -342,8 +303,8 @@ pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block // This means that the block will never go at the END of the list - that would be an append. // It can, however, be placed at the beginning, in which case the entire array shifts. - struct pk_memblock* new_block; - struct pk_memblock* old_block; + struct pk_memblock* new_block = NULL; + struct pk_memblock* old_block = NULL; size_t i, k; // 1. resize if needed @@ -369,7 +330,7 @@ pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block break; } } - if (i == 0) { + if (i == 0 && old_block != NULL) { *old_block = *block; } else { *new_block = *block; @@ -444,23 +405,33 @@ pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt) size_t ii; if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == false) { for (i = 0; i < bkt->debug_head_r; ++i) { - assert((debug_allocs[bkt->debug_bkt_index][i].size == 0 || (void*)(debug_allocs[bkt->debug_bkt_index][i].data) != data) && "mem address alloc'd twice!"); + assert((bkt->debug_blocks[i].size == 0 || (void*)(bkt->debug_blocks[i].data) != data) && "mem address alloc'd twice!"); } i = bkt->debug_head_l; if (bkt->debug_head_l == bkt->debug_head_r) { bkt->debug_head_l++; bkt->debug_head_r++; + + if (bkt->debug_head_r == bkt->debug_block_capacity) { + struct pk_memblock *debug_blocks; + debug_blocks = (struct pk_memblock*)aligned_alloc(alignof(struct pk_memblock), sizeof(struct pk_memblock) * bkt->debug_block_capacity + 128); + assert(debug_blocks != NULL); + memcpy(debug_blocks, bkt->debug_blocks, sizeof(struct pk_memblock) * bkt->debug_block_capacity); + free(bkt->debug_blocks); + bkt->debug_blocks = debug_blocks; + } + } else { for (ii = bkt->debug_head_l+1; ii <= bkt->debug_head_r; ++ii) { - if (debug_allocs[bkt->debug_bkt_index][ii].size == 0) { + if (bkt->debug_blocks[ii].size == 0) { bkt->debug_head_l = ii; break; } } } assert(bkt->debug_head_l <= bkt->debug_head_r); - debug_allocs[bkt->debug_bkt_index][i].data = (char*)data; - debug_allocs[bkt->debug_bkt_index][i].size = sz; + bkt->debug_blocks[i].data = (char*)data; + bkt->debug_blocks[i].size = sz; } #endif if (block->data == pk_bkt_head(bkt)) { @@ -486,7 +457,7 @@ pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt) int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = pk_bkt_data_sz(bkt); for (i = 0; i < bkt->debug_head_r; ++i) { - debug_tracked_alloc_size += debug_allocs[bkt->debug_bkt_index][i].size; + debug_tracked_alloc_size += bkt->debug_blocks[i].size; } for (i = 0; i <= bkt->block_head_r; ++i) { k = pk_memblock_blocks_idx(bkt, i); @@ -527,11 +498,11 @@ pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) size_t i, k; mtx_lock(&bkt->mtx); assert(bkt->alloc_count > 0); - assert(pk_memory_is_in_bucket(ptr, bkt) && "pointer not in memory bucket range"); + assert(pk_mem_bucket_ptr_is_in_mem_bucket(ptr, bkt) && "pointer not in memory bucket range"); assert(sz > 0 && "attempted to free pointer of size 0"); #ifdef PK_MEMORY_DEBUGGER bool found = PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT); - struct pk_memblock *debug_memblocks = debug_allocs[bkt->debug_bkt_index]; + struct pk_memblock *debug_memblocks = bkt->debug_blocks; struct pk_memblock *mb; if (found == false) { for (i = bkt->debug_head_r+1; i > 0; --i) { @@ -565,8 +536,8 @@ pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) #ifdef PK_MEMORY_DEBUGGER bkt->debug_head_l = 0; bkt->debug_head_r = 0; - debug_allocs[bkt->debug_bkt_index][0].data = NULL; - debug_allocs[bkt->debug_bkt_index][0].size = 0; + bkt->debug_blocks[0].data = NULL; + bkt->debug_blocks[0].size = 0; #endif mtx_unlock(&bkt->mtx); return; @@ -625,7 +596,7 @@ pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = pk_bkt_data_sz(bkt); for (i = 0; i < bkt->debug_head_r; ++i) { - debug_tracked_alloc_size += debug_allocs[bkt->debug_bkt_index][i].size; + debug_tracked_alloc_size += bkt->debug_blocks[i].size; } for (i = 0; i <= bkt->block_head_r; ++i) { k = pk_memblock_blocks_idx(bkt, i); |
