#ifndef PK_SINGLE_HEADER_FILE_H #define PK_SINGLE_HEADER_FILE_H /****************************************************************************** * PK Single-Header-Library V0.0.2 * * Author: Jonathan Bradley * Copyright: © 2024-2024 Jonathan Bradley * Description: * ******************************************************************************* * pkmacros.h: * ******************************************************************************* * pkmem-types.h: * ******************************************************************************* * pkmem.h: * ******************************************************************************* * pkstr.h: * ******************************************************************************* * pkev.h: * ******************************************************************************/ #define PK_VERSION "0.0.2" #ifdef PK_IMPL_ALL # ifndef PK_IMPL_MEM_TYPES # define PK_IMPL_MEM_TYPES # endif # ifndef PK_IMPL_MEM # define PK_IMPL_MEM # endif # ifndef PK_IMPL_STR # define PK_IMPL_STR # endif # ifndef PK_IMPL_EV # define PK_IMPL_EV # endif #endif #ifndef PK_MACROS_H #define PK_MACROS_H #ifndef PK_LOG_OVERRIDE # ifdef NDEBUG # define PK_LOG_ERR(str) (void)str # define PK_LOG_INF(str) (void)str # define PK_LOGV_ERR(str, ...) (void)str # define PK_LOGV_INF(str, ...) (void)str # else # define PK_LOG_ERR(str, ...) fprintf(stderr, str) # define PK_LOG_INF(str, ...) fprintf(stdout, str) # define PK_LOGV_ERR(str, ...) fprintf(stderr, str, __VA_ARGS__) # define PK_LOGV_INF(str, ...) fprintf(stdout, str, __VA_ARGS__) # endif #endif #define PK_Q(x) #x #define PK_QUOTE(x) PK_Q(x) #define PK_CONCAT2(x, y) x##y #define PK_CONCAT(x, y) PK_CONCAT2(x, y) #define PK_HAS_FLAG(val, flag) ((val & flag) == flag) #define PK_CLAMP(val, min, max) (val < min ? min : val > max ? max : val) #define PK_MIN(val, min) (val < min ? val : min) #define PK_MAX(val, max) (val > max ? val : max) #define PK_TO_BIN_PAT PK_Q(%c%c%c%c%c%c%c%c) #define PK_TO_BIN_PAT_8 PK_TO_BIN_PAT #define PK_TO_BIN_PAT_16 PK_TO_BIN_PAT PK_TO_BIN_PAT #define PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_16 PK_TO_BIN_PAT_16 #define PK_TO_BIN_PAT_64 PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_32 #define PK_TO_BIN(byte) \ ((byte) & 0x80 ? '1' : '0'), \ ((byte) & 0x40 ? '1' : '0'), \ ((byte) & 0x20 ? '1' : '0'), \ ((byte) & 0x10 ? '1' : '0'), \ ((byte) & 0x08 ? '1' : '0'), \ ((byte) & 0x04 ? '1' : '0'), \ ((byte) & 0x02 ? '1' : '0'), \ ((byte) & 0x01 ? '1' : '0') #define PK_TO_BIN_8(u8) PK_TO_BIN(u8) #define PK_TO_BIN_16(u16) PK_TO_BIN((u16 >> 8)), PK_TO_BIN(u16 & 0x00FF) #define PK_TO_BIN_32(u32) PK_TO_BIN_16((u32 >> 16)), PK_TO_BIN_16(u32 & 0x0000FFFF) #define PK_TO_BIN_64(u64) PK_TO_BIN_32((u64 >> 32)), PK_TO_BIN_32(u64 & 0x00000000FFFFFFFF) #if defined(__cplusplus) # define CAFE_BABE(T) reinterpret_cast(0xCAFEBABE) #else # define CAFE_BABE(T) (T *)(0xCAFEBABE) #endif #define NULL_CHAR_ARR(v, len) char v[len]; v[0] = '\0'; v[len-1] = '\0'; #define IS_CONSTRUCTIBLE(T) constexpr(std::is_default_constructible::value && !std::is_integral::value && !std::is_floating_point::value) #define IS_DESTRUCTIBLE(T) constexpr(std::is_destructible::value && !std::is_integral::value && !std::is_floating_point::value && !std::is_array::value) #define TypeSafeInt2_H(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ TypeName operator+(const TypeName& a, const TypeName& b); \ TypeName operator-(const TypeName& a, const TypeName& b); \ TypeName operator&(const TypeName& a, const TypeName& b); \ TypeName operator|(const TypeName& a, const TypeName& b); \ TypeName operator^(const TypeName& a, const TypeName& b); \ TypeName& operator++(TypeName& a); \ TypeName& operator--(TypeName& a); \ TypeName operator++(TypeName& a, int); \ TypeName operator--(TypeName& a, int); \ TypeName operator<<(const TypeName& a, const TypeName& b); \ TypeName operator>>(const TypeName& a, const TypeName& b); \ TypeName operator+=(TypeName& a, const TypeName& b); \ TypeName operator-=(TypeName& a, const TypeName& b); \ TypeName operator&=(TypeName& a, const TypeName& b); \ TypeName operator|=(TypeName& a, const TypeName& b); \ TypeName operator^=(TypeName& a, const TypeName& b); \ TypeName operator~(TypeName& a); #define TypeSafeInt2_B(TypeName, TypeName_T) \ inline TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ inline TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ inline TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ inline TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ inline TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ inline TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ inline TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ inline TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ inline TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ inline TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ inline TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ inline TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ inline TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ inline TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ inline TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ inline TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ inline TypeName operator~(TypeName& a) { \ a = static_cast(~static_cast(a)); \ return a; \ }; #define TypeSafeInt_H(TypeName, Type, Max) \ TypeSafeInt2_H(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #define TypeSafeInt_B(TypeName) \ TypeSafeInt2_B(TypeName, PK_CONCAT(TypeName, _T)) #define TypeSafeInt2_H_constexpr(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ constexpr TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ constexpr TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ constexpr TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ constexpr TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ constexpr TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ constexpr TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ constexpr TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ constexpr TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ constexpr TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ constexpr TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ constexpr TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ constexpr TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ constexpr TypeName operator~(const TypeName& a) { \ return static_cast(~static_cast(a)); \ }; #define TypeSafeInt_constexpr(TypeName, Type, Max) \ TypeSafeInt2_H_constexpr(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #endif /* PK_MACROS_H */ #ifndef PK_MEM_TYPES_H #define PK_MEM_TYPES_H #include typedef uint32_t pk_handle_bucket_index_T; typedef uint32_t pk_handle_item_index_T; enum PK_HANDLE_VALIDATION : uint8_t { PK_HANDLE_VALIDATION_VALID = 0, PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH = 1, PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH = 2, PK_HANDLE_VALIDATION_VALUE_MAX = 3, }; struct pk_handle { pk_handle_bucket_index_T bucketIndex; pk_handle_item_index_T itemIndex; }; #define PK_HANDLE_MAX ((struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF }) enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems); #if defined(__cplusplus) constexpr struct pk_handle pk_handle_MAX_constexpr = (struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF }; inline constexpr bool operator==(const pk_handle& lhs, const pk_handle& rhs) { return lhs.bucketIndex == rhs.bucketIndex && lhs.itemIndex == rhs.itemIndex; } template inline constexpr enum PK_HANDLE_VALIDATION pk_handle_validate_constexpr() { if constexpr (handle == pk_handle_MAX_constexpr) return PK_HANDLE_VALIDATION_VALUE_MAX; if constexpr (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if constexpr (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if constexpr (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* __cplusplus */ struct pk_membucket; #endif /* PK_MEM_TYPES_H */ #ifdef PK_IMPL_MEM_TYPES enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems) { if (handle.bucketIndex == PK_HANDLE_MAX.bucketIndex && handle.itemIndex == PK_HANDLE_MAX.itemIndex) return PK_HANDLE_VALIDATION_VALUE_MAX; if (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* PK_IMPL_MEM_TYPES */ #ifndef PK_MEM_H #define PK_MEM_H #include #include #ifndef PK_DEFAULT_BUCKET_SIZE # define PK_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL) #endif #ifndef PK_MINIMUM_ALIGNMENT # define PK_MINIMUM_ALIGNMENT 1 #endif #ifndef PK_MAXIMUM_ALIGNMENT # define PK_MAXIMUM_ALIGNMENT 64 #endif struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, bool transient); void pk_bucket_destroy(struct pk_membucket* bkt); void pk_bucket_reset(struct pk_membucket* bkt); void pk_memory_debug_print(); void pk_memory_flush(); void pk_memory_teardown_all(); bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt); void* pk_new_base(size_t sz, size_t alignment); void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt); void pk_delete_base(const void* ptr, size_t sz); void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt); #if defined(__cplusplus) #include static inline void stupid_header_warnings_cpp() { (void)std::is_const::value; } template inline T* pk_new(pk_membucket* bucket = nullptr) { void* ptr = nullptr; if (bucket) { ptr = pk_new_bkt(sizeof(T), alignof(T), bucket); } else { ptr = pk_new_base(sizeof(T), alignof(T)); } if IS_CONSTRUCTIBLE(T) { return new (ptr) T{}; } return reinterpret_cast(ptr); } template inline T* pk_new(long count, pk_membucket* bucket = nullptr) { char* ptr = nullptr; if (bucket) { ptr = static_cast(pk_new_bkt(sizeof(T) * count, alignof(T), bucket)); } else { ptr = static_cast(pk_new_base(sizeof(T) * count, alignof(T))); } if IS_CONSTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { new (ptr + (i * sizeof(T))) T{}; } } return reinterpret_cast(ptr); } template inline void pk_delete(const T* ptr, pk_membucket* bucket = nullptr) { if IS_DESTRUCTIBLE(T) { reinterpret_cast(ptr)->~T(); } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T), bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T)); } } template inline void pk_delete(const T* ptr, long count, pk_membucket* bucket = nullptr) { if IS_DESTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { reinterpret_cast(reinterpret_cast(ptr) + (i * sizeof(T)))->~T(); } } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T) * count, bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T) * count); } } #endif /* __cplusplus */ #endif /* PK_MEM */ #ifdef PK_IMPL_MEM #include #include #include #include static inline void pkmem_stupid_header_warnings() { (void)stdout; } #if defined(PK_MEMORY_DEBUGGER) /* * Note that certain aspects of this expect that you only have one non-transient bucket. * If you need to track multiple non-transient buckets, these sections will need a refactor. */ #endif struct pk_memblock { char* data; size_t size; }; struct pk_membucket { // the total size of the bucket, `blocks+ptr` int64_t size; // the current head of the bucket: byte offset from `ptr`. // All currently alloc'd data is before this offset int64_t head; // amount of lost bytes in this membucket, hopefully zero int64_t lostBytes; // the number of active allocations from this bucket int64_t allocs; // the index of the last empty block. // Should always point to `pk_memblock{ .data = ptr+head, .size=size-head }` int64_t lastEmptyBlockIndex; // number of pk_memblocks in the `*blocks` array int64_t maxBlockCount; // ptr to an array of pk_memblock to track ALL free space between ptr and ptr+sz struct pk_memblock* blocks; // starting point for alloc'd data union { char* ptr; void* raw; }; const char* description; mtx_t mtx; bool transient; }; static struct pk_membucket pk_buckets[8]; static int64_t pk_bucket_head = 0; #ifdef PK_MEMORY_DEBUGGER struct pk_dbg_memblock { struct pk_memblock blk; struct pk_membucket *bkt; }; static struct pk_dbg_memblock debug_all_allocs[1024 * 1024]; static int64_t debug_alloc_head = 0; static bool has_init_debug = false; #endif bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt) { if (ptr >= bkt->raw && (const char*)ptr < bkt->ptr + bkt->size) return true; return false; } void pk_memory_debug_print() { PK_LOGV_INF("Memory Manager printout:\nBucket count: %li\n", pk_bucket_head); for (long i = 0; i < pk_bucket_head; ++i) { PK_LOGV_INF("- bucket #%li\n", i); PK_LOGV_INF("\tdescription: %s\n", pk_buckets[i].description); PK_LOGV_INF("\tsize: %li\n", pk_buckets[i].size); PK_LOGV_INF("\thead: %li\n", pk_buckets[i].head); PK_LOGV_INF("\tlostBytes: %li\n", pk_buckets[i].lostBytes); PK_LOGV_INF("\tallocs: %li\n", pk_buckets[i].allocs); PK_LOGV_INF("\tlastEmptyBlockIndex: %li\n", pk_buckets[i].lastEmptyBlockIndex); PK_LOGV_INF("\tmaxBlockCount: %li\n", pk_buckets[i].maxBlockCount); PK_LOGV_INF("\tblocks: %p\n", pk_buckets[i].blocks); PK_LOGV_INF("\tptr: %p\n", pk_buckets[i].ptr); PK_LOGV_INF("\ttransient: %i\n", pk_buckets[i].transient); #ifdef PK_MEMORY_DEBUGGER uint64_t count = 0; for (int64_t d = 0; d < debug_alloc_head; ++d) { if (debug_all_allocs[d].bkt == &pk_buckets[d] && debug_all_allocs[d].blk.size > 0) { count += 1; } } PK_LOGV_INF("\tdebug alloc count: %lu\n", count); PK_LOGV_INF("\tdebug alloc last: %lu\n", debug_alloc_head); #endif } } void pk_memory_flush() { for (long i = pk_bucket_head - 2; i > -1; --i) { if (pk_buckets[i].head != 0) break; if (pk_buckets[i+1].head != 0) break; if (pk_buckets[i].transient == true) break; if (pk_buckets[i+1].transient == true) break; pk_bucket_head--; pk_bucket_destroy(&pk_buckets[i + 1]); } } void pk_memory_teardown_all() { for (int64_t i = pk_bucket_head - 1; i > 0; --i) { if (pk_buckets[i].ptr == nullptr) continue; pk_bucket_destroy(&pk_buckets[i]); } pk_bucket_head = 0; } static int64_t pk_bucket_create_inner(int64_t sz, bool transient, const char* description) { #ifdef PK_MEMORY_DEBUGGER if (has_init_debug == false) { has_init_debug = true; memset(debug_all_allocs, 0, sizeof(struct pk_dbg_memblock) * 1024 * 1024); } #endif int64_t blockCount = sz * 0.01; struct pk_membucket* bkt = &pk_buckets[pk_bucket_head]; bkt->size = sz; bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = 0; bkt->maxBlockCount = blockCount < 10 ? 10 : blockCount; bkt->blocks = (struct pk_memblock*)malloc(sz); assert(bkt->blocks != nullptr && "failed to allocate memory"); #if 1 memset(bkt->blocks, 0, sz); #endif bkt->ptr = ((char*)(bkt->blocks)) + (sizeof(struct pk_memblock) * bkt->maxBlockCount); size_t misalignment = (uint64_t)(bkt->ptr) % PK_MAXIMUM_ALIGNMENT; if (misalignment != 0) { size_t moreBlocks = misalignment / sizeof(struct pk_memblock); bkt->maxBlockCount += moreBlocks; bkt->ptr += (PK_MAXIMUM_ALIGNMENT - misalignment); } bkt->description = description; bkt->transient = transient; struct pk_memblock* memBlock = (struct pk_memblock*)(bkt->blocks); memBlock->data = bkt->ptr; memBlock->size = sz - (sizeof(struct pk_memblock) * bkt->maxBlockCount); return pk_bucket_head++; } struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, bool transient) { return &pk_buckets[pk_bucket_create_inner(sz, transient, description)]; } void pk_bucket_destroy(struct pk_membucket* bkt) { int64_t i; for (i = 0; i < pk_bucket_head; ++i) { if (&pk_buckets[i] == bkt) { if (pk_bucket_head == i) pk_bucket_head--; break; } } free(bkt->blocks); bkt->size = 0; bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = -1; bkt->maxBlockCount = 0; bkt->blocks = CAFE_BABE(struct pk_memblock); bkt->ptr = CAFE_BABE(char); bkt->transient = false; #ifdef PK_MEMORY_DEBUGGER for (i = debug_alloc_head; i > -1; --i) { if (debug_all_allocs[i].bkt == bkt) { debug_all_allocs[i].blk.data = NULL; debug_all_allocs[i].blk.size = 0u; } } #endif } void pk_bucket_reset(struct pk_membucket* bkt) { int64_t i; if (bkt->transient != true) { PK_LOG_ERR("WARNING: pk_bucket_reset called on non-transient pk_membucket\n"); } bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = 0; bkt->blocks->data = bkt->ptr; bkt->blocks->size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); #ifdef PK_MEMORY_DEBUGGER for (i = debug_alloc_head; i > -1; --i) { if (debug_all_allocs[i].bkt == bkt) { debug_all_allocs[i].blk.data = NULL; debug_all_allocs[i].blk.size = 0u; } } #endif } void pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block) { int64_t index = bkt->lastEmptyBlockIndex; while (index >= 0) { struct pk_memblock* b = &bkt->blocks[index]; struct pk_memblock* nb = &bkt->blocks[index + 1]; if (b->data < block->data) { break; } nb->data = b->data; nb->size = b->size; index -= 1; } struct pk_memblock *b = &bkt->blocks[index + 1]; b->data = block->data; b->size = block->size; bkt->lastEmptyBlockIndex += 1; } void pk_bucket_collapse_empty_blocks(struct pk_membucket* bkt) { for (int64_t i = bkt->lastEmptyBlockIndex; i > -1; --i) { struct pk_memblock* block = &bkt->blocks[i]; if (block->size == 0 && i == bkt->lastEmptyBlockIndex) { block->data = nullptr; bkt->lastEmptyBlockIndex -= 1; continue; } if (block->size > 0) { continue; } for (int64_t k = i; k < bkt->lastEmptyBlockIndex; ++k) { bkt->blocks[k].data = bkt->blocks[k + 1].data; bkt->blocks[k].size = bkt->blocks[k + 1].size; } bkt->lastEmptyBlockIndex -= 1; } } void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC return malloc(sz); #endif if (sz == 0) return nullptr; size_t calculatedAlignment = alignment < PK_MINIMUM_ALIGNMENT ? PK_MINIMUM_ALIGNMENT : alignment; size_t misalignment = 0; struct pk_memblock* prevBlock = nullptr; struct pk_memblock* block = nullptr; struct pk_memblock* nextBlock = nullptr; void* data = nullptr; mtx_lock(&bkt->mtx); for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { struct pk_memblock* blk = &bkt->blocks[i]; misalignment = (size_t)(blk->data) % calculatedAlignment; misalignment = (calculatedAlignment - misalignment) % calculatedAlignment; if (blk->size >= sz + misalignment) { block = blk; if (i < bkt->lastEmptyBlockIndex && bkt->blocks[i + 1].data == block->data + block->size) { nextBlock = &bkt->blocks[i + 1]; } if (i > 0 && i != bkt->lastEmptyBlockIndex && (bkt->blocks[i-1].data + bkt->blocks[i-1].size) == block->data) { prevBlock = &bkt->blocks[i - 1]; } break; } } assert(block != nullptr && "memory corruption: failed to find bucket with enough space"); data = block->data + misalignment; #ifdef PK_MEMORY_DEBUGGER bool handled = bkt->transient; if (handled == false) { for (int64_t i = 0; i < debug_alloc_head; ++i) { struct pk_dbg_memblock* mb = &debug_all_allocs[i]; if (mb->bkt != NULL) continue; assert((mb->blk.size == 0 || (void*)(mb->blk.data) != data) && "mem address alloc'd twice!"); if (mb->blk.size == 0) { mb->blk.data = (char*)(data); mb->blk.size = sz; mb->bkt = bkt; handled = true; break; } } } if (handled == false) { debug_all_allocs[debug_alloc_head++] = (struct pk_dbg_memblock){ .blk = (struct pk_memblock) { .data = (char*)(data), .size = sz, }, .bkt = bkt, }; } #endif int64_t afterSize = block->size - (misalignment + sz); if (block->data == bkt->ptr + bkt->head) { bkt->head += (sz + misalignment); } if (afterSize > 0 && nextBlock == nullptr) { struct pk_memblock newBlock; memset(&newBlock, 0, sizeof(struct pk_memblock)); newBlock.data = block->data + misalignment + sz; newBlock.size = afterSize; pk_bucket_insert_block(bkt, &newBlock); } if (prevBlock == nullptr && nextBlock == nullptr) { block->size = misalignment; } else if (nextBlock != nullptr) { block->size = misalignment; nextBlock->data -= afterSize; nextBlock->size += afterSize; } else if (prevBlock != nullptr) { prevBlock->size += misalignment; block->data += misalignment + sz; block->size = 0; // if you make it here, afterSize has already been handled } bkt->allocs++; assert(data >= bkt->raw && "allocated data is before bucket data"); assert((char*)data <= bkt->ptr + bkt->size && "allocated data is after bucket data"); pk_bucket_collapse_empty_blocks(bkt); #ifdef PK_MEMORY_DEBUGGER if (!bkt->transient) { int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); for (int64_t i = 0; i < debug_alloc_head; ++i) { if (debug_all_allocs[i].bkt != bkt) continue; debug_tracked_alloc_size += debug_all_allocs[i].blk.size; } for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { debug_bucket_alloc_size -= bkt->blocks[i].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); return data; } void* pk_new_base(size_t sz, size_t alignment) { struct pk_membucket* bkt = nullptr; for (long i = 0; i < pk_bucket_head; ++i) { if (pk_buckets[i].transient == false && pk_buckets[i].size - pk_buckets[i].head > sz + PK_MAXIMUM_ALIGNMENT) { bkt = &pk_buckets[i]; break; } } if (bkt == nullptr) { bkt = &pk_buckets[pk_bucket_create_inner(PK_DEFAULT_BUCKET_SIZE, false, "pk_bucket internally created")]; } return pk_new_bkt(sz, alignment, bkt); } void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC return std::free(const_cast(ptr)); #endif mtx_lock(&bkt->mtx); assert(ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size && "pointer not in memory bucket range"); assert(sz > 0 && "attempted to free pointer of size 0"); #ifdef PK_MEMORY_DEBUGGER bool found = bkt->transient; if (found == false) { for (int64_t i = debug_alloc_head - 1; i > -1; --i) { struct pk_dbg_memblock* mb = &debug_all_allocs[i]; if (mb->bkt != bkt) continue; if (mb->blk.size == 0) continue; if ((void*)(mb->blk.data) == ptr) { assert(mb->blk.size == sz && "[PK_MEMORY_HPP] incorrect free size"); mb->blk.size = 0; mb->bkt = NULL; found = true; if (i == (debug_alloc_head - 1)) { debug_alloc_head--; } break; } } } assert(found && "[PK_MEMORY_HPP] double free or invalid ptr"); #endif bkt->allocs--; if (bkt->allocs == 0) { bkt->head = 0; bkt->lastEmptyBlockIndex = 0; bkt->blocks[0].data = bkt->ptr; bkt->blocks[0].size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); return; } char* afterPtr = ((char*)(ptr))+sz; struct pk_memblock* beforeBlk = nullptr; struct pk_memblock* afterBlk = nullptr; for (int64_t i = bkt->lastEmptyBlockIndex; i > 0; --i) { if (bkt->blocks[i-1].data + bkt->blocks[i-1].size == ptr) { beforeBlk = &bkt->blocks[i-1]; } if (bkt->blocks[i].data == afterPtr) { afterBlk = &bkt->blocks[i]; break; } if (bkt->blocks[i-1].data < (char*)ptr) { break; } } if (ptr == bkt->ptr && afterBlk == nullptr && bkt->blocks[0].data == afterPtr) { afterBlk = &bkt->blocks[0]; } if (afterBlk != nullptr && afterBlk->data == bkt->ptr + bkt->head) { bkt->head -= sz; if (beforeBlk != nullptr) { bkt->head -= beforeBlk->size; } } if (beforeBlk == nullptr && afterBlk == nullptr) { struct pk_memblock newBlock; memset(&newBlock, 0, sizeof(struct pk_memblock)); newBlock.data = (char*)ptr; newBlock.size = sz; pk_bucket_insert_block(bkt, &newBlock); } else if (beforeBlk != nullptr && afterBlk != nullptr) { beforeBlk->size += sz + afterBlk->size; afterBlk->size = 0; } else if (beforeBlk != nullptr) { beforeBlk->size += sz; } else if (afterBlk != nullptr) { afterBlk->data -= sz; afterBlk->size += sz; } pk_bucket_collapse_empty_blocks(bkt); #ifdef PK_MEMORY_DEBUGGER if (!bkt->transient) { int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); for (int64_t i = 0; i < debug_alloc_head; ++i) { if (debug_all_allocs[i].bkt != bkt) continue; debug_tracked_alloc_size += debug_all_allocs[i].blk.size; } for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { debug_bucket_alloc_size -= bkt->blocks[i].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); } void pk_delete_base(const void* ptr, size_t sz) { struct pk_membucket* bkt = nullptr; for (long i = 0; i < pk_bucket_head; ++i) { bkt = &pk_buckets[i]; if (ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size) break; } assert(bkt != nullptr && "failed to determine correct memory bucket"); pk_delete_bkt(ptr, sz, bkt); } #endif /* PK_IMPL_MEM */ #ifndef PK_STR_H #define PK_STR_H #include struct pk_str { char *val; uint32_t length; uint32_t reserved; }; struct pk_cstr { const char *val; uint32_t length; uint32_t reserved; }; struct pk_str cstring_to_pk_str(char *s); struct pk_cstr cstring_to_pk_cstr(const char *s); struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s); struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s); int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs); int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs); #endif /* PK_STR_H */ #ifdef PK_IMPL_STR #include struct pk_str cstring_to_pk_str(char *s) { return (struct pk_str) { .val = s, .length = (uint32_t)(strlen(s)), .reserved = 0, }; } struct pk_cstr cstring_to_pk_cstr(const char *s) { return (struct pk_cstr) { .val = s, .length = (uint32_t)(strlen(s)), .reserved = 0, }; } struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s) { return (struct pk_str) { .val = (char *)(s->val), .length = s->length, .reserved = s->reserved, }; } struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s) { return (struct pk_cstr) { .val = (char *)(s->val), .length = s->length, .reserved = s->reserved, }; } int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } #endif /* PK_IMPL_STR */ #ifndef PK_EV_H #define PK_EV_H #include typedef uint64_t pk_ev_mgr_id_T; typedef uint64_t pk_ev_id_T; // note: pk_ev_init() is NOT thread-safe void pk_ev_init(); // note: pk_ev_teardown() is NOT thread-safe void pk_ev_teardown(); const pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data); const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data); bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data); void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data); #endif /* PK_EV_H */ #ifdef PK_IMPL_EV #include #include #include #include #include #include #include #ifndef PK_EV_INIT_MGR_COUNT # define PK_EV_INIT_MGR_COUNT 1 #endif #ifndef PK_EV_INIT_EV_COUNT # define PK_EV_INIT_EV_COUNT 16 #endif #ifndef PK_EV_INIT_CB_COUNT # define PK_EV_INIT_CB_COUNT 8 #endif #ifndef PK_EV_GROW_RATIO # define PK_EV_GROW_RATIO 1.5 #endif struct pk_ev_cb { pk_ev_cb_fn *cb; void *user_cb_data; }; struct pk_ev { struct pk_ev_cb *ev_cbs; void *user_ev_data; atomic_uint_fast8_t n_ev_cbs; }; struct pk_ev_mgr { struct pk_ev *ev; atomic_uint_fast8_t n_ev; atomic_uint_fast8_t rn_ev; atomic_uint_fast8_t rn_cb; }; struct pk_ev_mstr { atomic_uint_fast64_t flg_mgrs; atomic_uint_fast64_t rn_mgrs; struct pk_ev_mgr **mgrs; mtx_t *mtxs; }; struct pk_ev_mstr pk_ev_mstr; void pk_ev_init() { int i; pk_ev_mstr.mgrs = (struct pk_ev_mgr **)malloc(sizeof(void *) * PK_EV_INIT_MGR_COUNT); pk_ev_mstr.mtxs = (mtx_t*)malloc(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) { mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain); } atomic_store(&pk_ev_mstr.flg_mgrs, 0); atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT); } void pk_ev_teardown() { int i; for (i = 0; i < pk_ev_mstr.rn_mgrs; ++i) { if ((atomic_load(&pk_ev_mstr.rn_mgrs) & (1lu << i)) == 0) continue; mtx_lock(&pk_ev_mstr.mtxs[i]); free(pk_ev_mstr.mgrs[i]); pk_ev_mstr.mgrs[i] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[i]); mtx_destroy(&pk_ev_mstr.mtxs[i]); } free(pk_ev_mstr.mgrs); free(pk_ev_mstr.mtxs); pk_ev_mstr.mgrs = NULL; pk_ev_mstr.mtxs = NULL; } static struct pk_ev_mgr* pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { int i; struct pk_ev *ev; size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (struct pk_ev_cb) * ev_count * cb_count); size_t sz_ev = (sizeof(struct pk_ev_cb) * cb_count); size_t sz_evs = sizeof(struct pk_ev) * ev_count; struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz); if (mgr == NULL) goto early_exit; memset(mgr, 0, sz); mgr->ev = (struct pk_ev*)(((char *)mgr) + sizeof(struct pk_ev_mgr)); atomic_init(&mgr->rn_ev, ev_count); atomic_init(&mgr->rn_cb, cb_count); atomic_init(&mgr->n_ev, 0); for (i = 0; i < mgr->rn_ev; ++i) { ev = &mgr->ev[i]; atomic_init(&ev->n_ev_cbs, 0); ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i)); } early_exit: return mgr; } static void pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) { int i; struct pk_ev *ev_old; struct pk_ev *ev; atomic_store(&mgr->n_ev, atomic_load(&old->n_ev)); for (i = 0; i < old->n_ev; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; memcpy(ev->ev_cbs, ev_old->ev_cbs, sizeof(struct pk_ev_cb) * atomic_load(&ev_old->n_ev_cbs)); atomic_store(&ev->n_ev_cbs, atomic_load(&ev_old->n_ev_cbs)); } } const pk_ev_mgr_id_T pk_ev_create_mgr() { uint64_t i; pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; pk_ev_mgr_id_T id; struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT); if (mgr == NULL) return -1; start: flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((flg & (1u << i)) == 0) break; } if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) { goto recreate; } id = i; flg_new |= (1u << i); if (atomic_compare_exchange_strong(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } pk_ev_mstr.mgrs[id]= mgr; return id; recreate: // TODO recreate mgr, out of space assert(1 == 0 && "[pkev.h] Out of mgr space."); exit(1); goto start; } void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) { assert(evmgr >= 0); mtx_lock(&pk_ev_mstr.mtxs[evmgr]); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); } const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data) { assert(evmgr < 64); pk_ev_id_T id; struct pk_ev_mgr *mgr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) { mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO, pk_ev_mstr.mgrs[evmgr]->rn_cb); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } id = pk_ev_mstr.mgrs[evmgr]->n_ev++; pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return id; } bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data) { assert(evmgr < 64); struct pk_ev_mgr *mgr; uint8_t cb_index; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) { mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs++; pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb; pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return true; } void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data) { assert(evmgr < 64); uint8_t i; for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs; ++i) { (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)( pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data, pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data, user_emit_data); } } #endif /* PK_IMPL_EV */ #endif /* PK_SINGLE_HEADER_FILE_H */