#ifndef PK_SINGLE_HEADER_FILE_H #define PK_SINGLE_HEADER_FILE_H /******************************************************************************* * PK Single-Header-Library V0.4.4 * * Author: Jonathan Bradley * Copyright: © 2024-2025 Jonathan Bradley * Description: * * A collection of useful programming tools, available for C and C++ as a * single-header file. To enable, in ONE single C or C++ file, declare * PK_IMPL_ALL before including pk.h. * * Example: * * pk.h.include.c * ``` c * #define PK_IMPL_ALL * #include "pk.h" * ``` * * It is also possible to enable modules ad-hoc by defining each IMPL * individually: * * pk.h.include.c * ``` c * # define PK_IMPL_MEM_TYPES * # define PK_IMPL_MEM * # define PK_IMPL_STR * # define PK_IMPL_EV * # define PK_IMPL_ARR * # define PK_IMPL_STN * #include "pk.h" * ``` * ******************************************************************************** * pkmacros.h: * * Provides a set of useful macros for a variety of uses. * * The macros PK_LOG* provide simple logging utilities. These can be overridden * by providing your own implementations of each and defining PK_LOG_OVERRIDE * before including pk.h Note that each of these are no-op'd if NDEBUG is * defined. * * The TypeSafeInt_H and TypeSafeInt_B macros provide a way to define * type-specific integers, implemented via enums. * ******************************************************************************** * pkmem-types.h: def PK_IMPL_MEM_TYPES before including pk.h to enable ad-hoc. * * Provides the types needed by pkmem, as well as a generic pk_handle featuring a * bucket+item indexing system. * ******************************************************************************** * pkmem.h: def PK_IMPL_MEM before including pk.h to enable ad-hoc. * * A bucketed memory manager. Allows for the creation and management of up to a * well-defined number of buckets. * * Thread safety: Bucket creation and destruction is *not* thread-safe. On the * other hand, the "pk_new" and "pk_delete" methods *are* thread-safe, but * thread-safety is implemented per-bucket via a single mutex with long-running * lock times. PRs for a more performant thread-safe strategy are welcome, * complexity and benchmark depending. * * The following definitions (shown with defaults) can be overridden: * PK_DEFAULT_BUCKET_SIZE 256MB (used when bkt is NULL on first call) * PK_MINIMUM_ALIGNMENT 1 * PK_MAXIMUM_ALIGNMENT 64 * PK_MAX_BUCKET_COUNT 8 * * For debugging purposes, define the following: * PK_MEMORY_DEBUGGER : enables a tracking system for all allocs and frees to * ensure bucket validity and consistency. * PK_MEMORY_FORCE_MALLOC : completely disables pkmem and its debugging features * in favor of directly using malloc and free. Useful for out-of-bounds * checking. * ******************************************************************************** * pkstr.h: def PK_IMPL_STR before including pk.h to enable ad-hoc. * * Provides a simple string structure, allowing the user to track the string * length and reserved buffer length. Limits max string length to uint32_t max * size, which is roughly 4GB. * * Tip: set reserved to 0 for compile-time strings as well as for strings alloc'd * in a larger buffer (such as bulk-loaded data). * ******************************************************************************** * pkev.h: def PK_IMPL_EV before including pk.h to enable ad-hoc. * * Provides a simple event callback system. While the _init and _teardown * functions are NOT thread-safe, the _register and _emit functions are. * Note: uses malloc. * * Each mgr is stored contiguously with its data. Consider the following layout: * [[mgr][ev 0][ev 1][..][ev N][ev 1 cb array][ev 2 cb array][..][ev N cb array]] * * The following definitions (shown with defaults) can be overridden: * PK_EV_INIT_MGR_COUNT 1 * PK_EV_INIT_EV_COUNT 16 * PK_EV_INIT_CB_COUNT 8 * PK_EV_GROW_RATIO 1.5 * * The number of evs and cbs (per ev) is stored as a uint8_t, so a hard-limit of * 255 is to be observed for each. The number of mgrs is stored as a uint64_t. * * Note that PK_EV_GROW_RATIO is used in two scenarios: * 1. When registering an ev on a full mgr. * 2. When registering a cb on a full ev. * The grow ratio is applied to the ev count and cb count in their respective * scenarios. This causes a new allocation for the entire mgr. The existing * mgr and its evs and cbs are copied to the new larger buffer space. * Explicitly, the number of mgrs does not grow dynamically. Use * PK_EV_INIT_MGR_COUNT to control the number of mgrs. * * Note that increasing PK_EV_INIT_MGR_COUNT isn't recommended, but you may * consider doing so if you have specific size or contiguity requirements. For * example, you could -DPK_EV_INIT_EV_COUNT=1 to reduce the memory footprint of * each event/mgr, and simply create a new mgr for each needed event. Be aware * that in this provided scenario a given mgr will still grow if a second EV is * registered. * ******************************************************************************** * pkarr.h: def PK_IMPL_ARR before including pk.h to enable ad-hoc * * Provides a structure for managing contiguous lists * * The following definitions (shown with defaults) can be overridden: * PK_ARR_INITIAL_COUNT 16 * PK_ARR_GROW_RATIO 1.5 * PK_ARR_MOVE_IN_PLACE (not defined) * * The macro `PK_ARR_MOVE_IN_PLACE` ensures that when possible, the pointer value * of `arr->data` is preserved. * It is used in the following methods: * `pk_arr_move_to_back` * `pk_arr_remove_at` * This has two additinal benefits: * 1. Minimizing the number and `sz` of calls to `pk_new` * 2. Ensuring `data[0]` to `data[(N - 1) * stride]` is not copied extraneously * to a new buffer. * The speed of this will vary depending on usage, platform, and compiler. * * Initialize `stride`, `alignment`, and `bkt` (optional) members * *before* calling any `pk_arr_*` methods. * Alternatively, if using c++, use the template ctor. * * Examples: * ``` c * struct pk_arr arr = {0}; * arr.stride = sizeof(obj); // required * arr.alignment = alignof(obj); // required * arr.bkt = bkt; // optional * pk_arr_reserve(&arr, 10); // optional * pk_arr_append(&arr, &obj); * ``` * ``` c++ * struct pk_arr arr(bkt); * pk_arr_reserve(&arr, 10); // optional * pk_arr_append(&arr, &obj); * ``` * ``` c * struct pk_arr arr = {0}; * arr.stride = sizeof(obj); // required * arr.alignment = alignof(obj); // required * arr.bkt = bkt; // optional * pk_arr_resize(&arr, 10); * obj* d = (obj*)arr->data; * d[0] = ...; * ``` * ``` c++ * struct pk_arr_t arr(); * pk_arr_resize(&arr, 10); * arr[0] = {}; * ``` * ******************************************************************************** * pkstn.h: def PK_IMPL_STN before including pk.h to enable ad-hoc. * * Provides a thorough interface for interacting with the `stoi` family of * procedures. * ******************************************************************************** * pktmr.h: No IMPL define, all methods are macros. * * Offers a set of `pk_tmr*` macros for elapsed time checking. * * The following definitions (shown with defaults) can be overridden: * PK_TMR_CLOCK CLOCK_MONOTONIC * * If your needs require you to use more than one clock, I recommend calling * `clock_gettime` manually instead of calling `pk_tmr_start`/`pk_tmr_stop`. * `pk_tmr.b` is the start time. * `pk_tmr.e` end the end time. * You could then call the `pk_tmr_duration...` convenience macros as needed. * ******************************************************************************** * pkuuid.h: define PK_IMPL_UUID before including pk.h to enable ad-hoc. * * Provides a 16-byte unsigned char array struct for uuids. * * The following definitions (shown with defaults) can be overridden: * PK_UUID_CLOCK CLOCK_TAI (preferred, if available) * PK_UUID_CLOCK CLOCK_REALTIME (fallback) * * The `PK_UUID_CLOCK` macro has minimal built-in fallback logic. * The uuidv7 specification states that the timestamp portion of the uuid must be * a unix epoch, leap seconds EXCLUDED. Only `CLOCK_TAI` meets this requirement * on Linux. * * Note that this currectly calls `srand()` once at startup, and calls `rand()` * 2 times for each uuidv7 to fill 74 bits with random data (with an XOR for the * remaining 10 bits). * ******************************************************************************** * pkbktarr.h: define PK_IMPL_BKTARR before including pk.h to enable ad-hoc. * * Provides a struct for bucketed data allocation. * * Maximum (default) bucket limits are as follows: * buckets: 0xFFFFFF (16777215) * items/bucket: 0x40 (64) * * Note that you may specify separate `pk_membucket`s for the the struct's * arrays `bucketed_data` + `idx_unused`, and the actual bucketed array data * found within `bucketed_data`. * If the `pk_membucket` for "data" is exclusive to this struct, each bucket (and * by extension, the data) will be contiguious in memory. * * Examples: * ```c * struct pk_bkt_arr_handle custom_limits; * custom_limits.b = 8; * custom_limits.i = 8; * struct pk_bkt_arr arr; * pk_bkt_arr_init( * &arr, sizeof(int), alignof(int), custom_limits, bkt_buckets, bkt_data); * struct pk_bkt_arr_handle h = pk_bkt_arr_new_handle(&arr); * int **int_ptrs = (int**)arr.bucketed_data; * int_ptrs[h.b][h.i] = 128; * pk_bkt_arr_free_handle(&arr, h); * pk_bkt_arr_teardown(&arr); * ``` * ```c++ * // default limits, no pk_membucket * struct pk_bkt_arr arr(); * struct pk_bkt_arr_handle h = pk_bkt_arr_new_handle(&arr); * arr[h] = 128; * pk_bkt_arr_free_handle(&arr, h); * arr.~pk_bkt_arr(); // manually call dtor for globals * ``` * *******************************************************************************/ #define PK_VERSION "0.4.4" #ifdef PK_IMPL_ALL # ifndef PK_IMPL_MEM_TYPES # define PK_IMPL_MEM_TYPES # endif # ifndef PK_IMPL_MEM # define PK_IMPL_MEM # endif # ifndef PK_IMPL_STR # define PK_IMPL_STR # endif # ifndef PK_IMPL_EV # define PK_IMPL_EV # endif # ifndef PK_IMPL_ARR # define PK_IMPL_ARR # endif # ifndef PK_IMPL_STN # define PK_IMPL_STN # endif # ifndef PK_IMPL_UUID # define PK_IMPL_UUID # endif # ifndef PK_IMPL_BKTARR # define PK_IMPL_BKTARR # endif #endif #ifndef PK_MACROS_H #define PK_MACROS_H #ifndef PK_LOG_OVERRIDE # ifdef NDEBUG # define PK_LOG_ERR(str) (void)str # define PK_LOG_INF(str) (void)str # define PK_LOGV_ERR(str, ...) (void)str # define PK_LOGV_INF(str, ...) (void)str # else # define PK_LOG_ERR(str) fprintf(stderr, str) # define PK_LOG_INF(str) fprintf(stdout, str) # define PK_LOGV_ERR(str, ...) fprintf(stderr, str, __VA_ARGS__) # define PK_LOGV_INF(str, ...) fprintf(stdout, str, __VA_ARGS__) # endif #endif #define PK_Q(x) #x #define PK_QUOTE(x) PK_Q(x) #define PK_CONCAT2(x, y) x##y #define PK_CONCAT(x, y) PK_CONCAT2(x, y) #define PK_HAS_FLAG(val, flag) ((val & flag) == flag) #define PK_CLAMP(val, min, max) (val < min ? min : val > max ? max : val) #define PK_MIN(val, min) (val < min ? val : min) #define PK_MAX(val, max) (val > max ? val : max) #define PK_TO_BIN_PAT PK_Q(%c%c%c%c%c%c%c%c) #define PK_TO_BIN_PAT_8 PK_TO_BIN_PAT #define PK_TO_BIN_PAT_16 PK_TO_BIN_PAT PK_TO_BIN_PAT #define PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_16 PK_TO_BIN_PAT_16 #define PK_TO_BIN_PAT_64 PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_32 #define PK_TO_BIN(byte) \ ((byte) & 0x80 ? '1' : '0'), \ ((byte) & 0x40 ? '1' : '0'), \ ((byte) & 0x20 ? '1' : '0'), \ ((byte) & 0x10 ? '1' : '0'), \ ((byte) & 0x08 ? '1' : '0'), \ ((byte) & 0x04 ? '1' : '0'), \ ((byte) & 0x02 ? '1' : '0'), \ ((byte) & 0x01 ? '1' : '0') #define PK_TO_BIN_8(u8) PK_TO_BIN(u8) #define PK_TO_BIN_16(u16) PK_TO_BIN((u16 >> 8)), PK_TO_BIN(u16 & 0x00FF) #define PK_TO_BIN_32(u32) PK_TO_BIN_16((u32 >> 16)), PK_TO_BIN_16(u32 & 0x0000FFFF) #define PK_TO_BIN_64(u64) PK_TO_BIN_32((u64 >> 32)), PK_TO_BIN_32(u64 & 0x00000000FFFFFFFF) #if defined(__cplusplus) # define CAFE_BABE(T) reinterpret_cast(0xCAFEBABE) #else # define CAFE_BABE(T) (T *)(0xCAFEBABE) #endif #define NULL_CHAR_ARR(v, len) char v[len]; v[0] = '\0'; v[len-1] = '\0'; #define IS_CONSTRUCTIBLE(T) constexpr(std::is_default_constructible::value && !std::is_integral::value && !std::is_floating_point::value) #define IS_DESTRUCTIBLE(T) constexpr(std::is_destructible::value && !std::is_integral::value && !std::is_floating_point::value && !std::is_array::value) #define TypeSafeInt2_H(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ TypeName operator+(const TypeName& a, const TypeName& b); \ TypeName operator-(const TypeName& a, const TypeName& b); \ TypeName operator*(const TypeName& a, const TypeName& b); \ TypeName operator/(const TypeName& a, const TypeName& b); \ TypeName operator&(const TypeName& a, const TypeName& b); \ TypeName operator|(const TypeName& a, const TypeName& b); \ TypeName operator^(const TypeName& a, const TypeName& b); \ TypeName& operator++(TypeName& a); \ TypeName& operator--(TypeName& a); \ TypeName operator++(TypeName& a, int); \ TypeName operator--(TypeName& a, int); \ TypeName operator<<(const TypeName& a, const TypeName& b); \ TypeName operator>>(const TypeName& a, const TypeName& b); \ TypeName operator+=(TypeName& a, const TypeName& b); \ TypeName operator-=(TypeName& a, const TypeName& b); \ TypeName operator*=(TypeName& a, const TypeName& b); \ TypeName operator/=(TypeName& a, const TypeName& b); \ TypeName operator&=(TypeName& a, const TypeName& b); \ TypeName operator|=(TypeName& a, const TypeName& b); \ TypeName operator^=(TypeName& a, const TypeName& b); \ TypeName operator~(TypeName& a); #define TypeSafeInt2_B(TypeName, TypeName_T) \ inline TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ inline TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ inline TypeName operator*(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) * static_cast(b)); \ } \ inline TypeName operator/(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) / static_cast(b)); \ } \ inline TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ inline TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ inline TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ inline TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ inline TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ inline TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ inline TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ inline TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ inline TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ inline TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ inline TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ inline TypeName operator*=(TypeName& a, const TypeName& b) { \ a = TypeName{a * b}; \ return a; \ }; \ inline TypeName operator/=(TypeName& a, const TypeName& b) { \ a = TypeName{a / b}; \ return a; \ }; \ inline TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ inline TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ inline TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ inline TypeName operator~(TypeName& a) { \ a = static_cast(~static_cast(a)); \ return a; \ }; #define TypeSafeInt_H(TypeName, Type, Max) \ TypeSafeInt2_H(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #define TypeSafeInt_B(TypeName) \ TypeSafeInt2_B(TypeName, PK_CONCAT(TypeName, _T)) #define TypeSafeInt2_H_constexpr(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ constexpr TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ constexpr TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ constexpr TypeName operator*(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) * static_cast(b)); \ } \ constexpr TypeName operator/(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) / static_cast(b)); \ } \ constexpr TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ constexpr TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ constexpr TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ constexpr TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ constexpr TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ constexpr TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ constexpr TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ constexpr TypeName operator*=(TypeName& a, const TypeName& b) { \ a = TypeName{a * b}; \ return a; \ }; \ constexpr TypeName operator/=(TypeName& a, const TypeName& b) { \ a = TypeName{a / b}; \ return a; \ }; \ constexpr TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ constexpr TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ constexpr TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ constexpr TypeName operator~(const TypeName& a) { \ return static_cast(~static_cast(a)); \ }; #define TypeSafeInt_constexpr(TypeName, Type, Max) \ TypeSafeInt2_H_constexpr(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #endif /* PK_MACROS_H */ #ifndef PK_MEM_TYPES_H #define PK_MEM_TYPES_H #include typedef uint32_t pk_handle_bucket_index_T; typedef uint32_t pk_handle_item_index_T; enum PK_HANDLE_VALIDATION : uint8_t { PK_HANDLE_VALIDATION_VALID = 0, PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH = 1, PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH = 2, PK_HANDLE_VALIDATION_VALUE_MAX = 3, }; struct pk_handle { pk_handle_bucket_index_T bucketIndex; pk_handle_item_index_T itemIndex; }; #if ! defined(__cplusplus) #define PK_HANDLE_MAX ((struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF }) #else #define PK_HANDLE_MAX (pk_handle{ 0xFFFFFFFF, 0xFFFFFFFF }) #endif enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems); #if defined(__cplusplus) constexpr struct pk_handle pk_handle_MAX_constexpr = { 0xFFFFFFFF, 0xFFFFFFFF }; inline constexpr bool operator==(const pk_handle& lhs, const pk_handle& rhs) { return lhs.bucketIndex == rhs.bucketIndex && lhs.itemIndex == rhs.itemIndex; } template inline constexpr enum PK_HANDLE_VALIDATION pk_handle_validate_constexpr() { if constexpr (handle == pk_handle_MAX_constexpr) return PK_HANDLE_VALIDATION_VALUE_MAX; if constexpr (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if constexpr (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if constexpr (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* __cplusplus */ struct pk_membucket; #endif /* PK_MEM_TYPES_H */ #ifdef PK_IMPL_MEM_TYPES enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems) { if (handle.bucketIndex == PK_HANDLE_MAX.bucketIndex && handle.itemIndex == PK_HANDLE_MAX.itemIndex) return PK_HANDLE_VALIDATION_VALUE_MAX; if (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* PK_IMPL_MEM_TYPES */ #ifndef PK_MEM_H #define PK_MEM_H #include #include #ifndef PK_DEFAULT_BUCKET_SIZE # define PK_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL) #endif #ifndef PK_MINIMUM_ALIGNMENT # define PK_MINIMUM_ALIGNMENT 1 #endif #ifndef PK_MAXIMUM_ALIGNMENT # define PK_MAXIMUM_ALIGNMENT 64 #endif struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, bool transient); void pk_bucket_destroy(struct pk_membucket* bkt); void pk_bucket_reset(struct pk_membucket* bkt); void pk_memory_debug_print(); void pk_memory_flush(); void pk_memory_teardown_all(); bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt); void* pk_new_base(size_t sz, size_t alignment); void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt); void* pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt); void pk_delete_base(const void* ptr, size_t sz); void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt); void pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt); #if defined(__cplusplus) #include static inline void stupid_header_warnings_cpp() { (void)std::is_const::value; } template inline T* pk_new(pk_membucket* bucket = nullptr) { void* ptr = nullptr; if (bucket) { ptr = pk_new_bkt(sizeof(T), alignof(T), bucket); } else { ptr = pk_new_base(sizeof(T), alignof(T)); } if IS_CONSTRUCTIBLE(T) { return new (ptr) T{}; } return reinterpret_cast(ptr); } template inline T* pk_new(long count, pk_membucket* bucket = nullptr) { char* ptr = nullptr; if (bucket) { ptr = static_cast(pk_new_bkt(sizeof(T) * count, alignof(T), bucket)); } else { ptr = static_cast(pk_new_base(sizeof(T) * count, alignof(T))); } if IS_CONSTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { new (ptr + (i * sizeof(T))) T{}; } } return reinterpret_cast(ptr); } template inline void pk_delete(const T* ptr, pk_membucket* bucket = nullptr) { if IS_DESTRUCTIBLE(T) { reinterpret_cast(ptr)->~T(); } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T), bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T)); } } template inline void pk_delete(const T* ptr, long count, pk_membucket* bucket = nullptr) { if IS_DESTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { reinterpret_cast(reinterpret_cast(ptr) + (i * sizeof(T)))->~T(); } } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T) * count, bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T) * count); } } #endif /* __cplusplus */ #endif /* PK_MEM */ #ifdef PK_IMPL_MEM #include #include #include #include static inline void pkmem_stupid_header_warnings() { (void)stdout; } #if defined(PK_MEMORY_DEBUGGER) /* * Note that certain aspects of this expect that you only have one non-transient bucket. * If you need to track multiple non-transient buckets, these sections will need a refactor. */ #endif #ifndef PK_MAX_BUCKET_COUNT # define PK_MAX_BUCKET_COUNT 8 #endif struct pk_memblock { char* data; size_t size; }; struct pk_membucket { // the total size of the bucket, `blocks+ptr` size_t size; // the current head of the bucket: byte offset from `ptr`. // All currently alloc'd data is before this offset size_t head; // amount of lost bytes in this membucket, hopefully zero size_t lostBytes; // the number of active allocations from this bucket size_t allocs; // the index of the last empty block. // Should always point to `pk_memblock{ .data = ptr+head, .size=size-head }` size_t lastEmptyBlockIndex; // number of pk_memblocks in the `*blocks` array size_t maxBlockCount; // ptr to an array of pk_memblock to track ALL free space between ptr and ptr+sz struct pk_memblock* blocks; // starting point for alloc'd data union { char* ptr; void* raw; }; const char* description; mtx_t mtx; bool transient; }; static struct pk_membucket pk_buckets[PK_MAX_BUCKET_COUNT]; static size_t pk_bucket_head = 0; #ifdef PK_MEMORY_DEBUGGER struct pk_dbg_memblock { struct pk_memblock blk; struct pk_membucket *bkt; }; static struct pk_dbg_memblock debug_all_allocs[1024 * 1024]; static size_t debug_alloc_head = 0; static bool has_init_debug = false; #endif bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt) { if (ptr >= bkt->raw && (const char*)ptr < bkt->ptr + bkt->size) return true; return false; } void pk_memory_debug_print() { PK_LOGV_INF("Memory Manager printout:\nBucket count: %li\n", pk_bucket_head); for (size_t i = 0; i < pk_bucket_head; ++i) { PK_LOGV_INF("- bucket #%li\n", i); PK_LOGV_INF("\tdescription: %s\n", pk_buckets[i].description); PK_LOGV_INF("\tsize: %li\n", pk_buckets[i].size); PK_LOGV_INF("\thead: %li\n", pk_buckets[i].head); PK_LOGV_INF("\tlostBytes: %li\n", pk_buckets[i].lostBytes); PK_LOGV_INF("\tallocs: %li\n", pk_buckets[i].allocs); PK_LOGV_INF("\tlastEmptyBlockIndex: %li\n", pk_buckets[i].lastEmptyBlockIndex); PK_LOGV_INF("\tmaxBlockCount: %li\n", pk_buckets[i].maxBlockCount); PK_LOGV_INF("\tblocks: %p\n", (void *)pk_buckets[i].blocks); PK_LOGV_INF("\tptr: %p\n", (void *)pk_buckets[i].ptr); PK_LOGV_INF("\ttransient: %i\n", pk_buckets[i].transient); #ifdef PK_MEMORY_DEBUGGER uint64_t count = 0; for (size_t d = 0; d < debug_alloc_head; ++d) { if (debug_all_allocs[d].bkt == &pk_buckets[d] && debug_all_allocs[d].blk.size > 0) { count += 1; } } PK_LOGV_INF("\tdebug alloc count: %lu\n", count); PK_LOGV_INF("\tdebug alloc last: %lu\n", debug_alloc_head); #endif } } void pk_memory_flush() { for (long i = pk_bucket_head - 1; i > -1; --i) { if (pk_buckets[i].head != 0) break; if (pk_buckets[i].transient == true) break; pk_bucket_head--; if (pk_buckets[i].raw == CAFE_BABE(void)) continue; pk_bucket_destroy(&pk_buckets[i]); } } void pk_memory_teardown_all() { for (int64_t i = pk_bucket_head; i > 0; --i) { if (pk_buckets[i - 1].ptr == nullptr) continue; if (pk_buckets[i - 1].ptr == CAFE_BABE(char)) continue; pk_bucket_destroy(&pk_buckets[i - 1]); } pk_bucket_head = 0; } static int64_t pk_bucket_create_inner(int64_t sz, bool transient, const char* description) { assert(pk_bucket_head < PK_MAX_BUCKET_COUNT && "pkmem.h: reserved bucket count exceeded"); #ifdef PK_MEMORY_DEBUGGER if (has_init_debug == false) { has_init_debug = true; memset(debug_all_allocs, 0, sizeof(struct pk_dbg_memblock) * 1024 * 1024); } #endif int64_t blockCount = sz * 0.01; struct pk_membucket* bkt = &pk_buckets[pk_bucket_head]; bkt->size = sz; bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = 0; bkt->maxBlockCount = blockCount < 10 ? 10 : blockCount; bkt->blocks = (struct pk_memblock*)malloc(sz); mtx_init(&bkt->mtx, mtx_plain); assert(bkt->blocks != nullptr && "failed to allocate memory"); #if 1 memset(bkt->blocks, 0, sz); #endif bkt->ptr = ((char*)(bkt->blocks)) + (sizeof(struct pk_memblock) * bkt->maxBlockCount); size_t misalignment = (size_t)(bkt->ptr) % PK_MAXIMUM_ALIGNMENT; if (misalignment != 0) { size_t moreBlocks = misalignment / sizeof(struct pk_memblock); bkt->maxBlockCount += moreBlocks; bkt->ptr += (PK_MAXIMUM_ALIGNMENT - misalignment); } bkt->description = description; bkt->transient = transient; struct pk_memblock* memBlock = (struct pk_memblock*)(bkt->blocks); memBlock->data = bkt->ptr; memBlock->size = sz - (sizeof(struct pk_memblock) * bkt->maxBlockCount); return pk_bucket_head++; } struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, bool transient) { return &pk_buckets[pk_bucket_create_inner(sz, transient, description)]; } void pk_bucket_destroy(struct pk_membucket* bkt) { size_t i; for (i = 0; i < pk_bucket_head; ++i) { if (&pk_buckets[i] == bkt) { if (pk_bucket_head == i + 1) pk_bucket_head--; break; } } free(bkt->blocks); bkt->size = 0; bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = 0; bkt->maxBlockCount = 0; bkt->blocks = CAFE_BABE(struct pk_memblock); bkt->ptr = CAFE_BABE(char); bkt->transient = false; mtx_destroy(&bkt->mtx); #ifdef PK_MEMORY_DEBUGGER size_t ii; for (ii = debug_alloc_head+1; ii > 0; --ii) { i = ii-1; if (debug_all_allocs[i].bkt == bkt) { debug_all_allocs[i].blk.data = NULL; debug_all_allocs[i].blk.size = 0u; } } #endif } void pk_bucket_reset(struct pk_membucket* bkt) { #ifdef PK_MEMORY_DEBUGGER int64_t i; #endif if (bkt->transient != true) { PK_LOG_ERR("WARNING: pk_bucket_reset called on non-transient pk_membucket\n"); } bkt->head = 0; bkt->lostBytes = 0; bkt->allocs = 0; bkt->lastEmptyBlockIndex = 0; bkt->blocks->data = bkt->ptr; bkt->blocks->size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); #ifdef PK_MEMORY_DEBUGGER for (i = debug_alloc_head; i > -1; --i) { if (debug_all_allocs[i].bkt == bkt) { debug_all_allocs[i].blk.data = NULL; debug_all_allocs[i].blk.size = 0u; } } #endif } void pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block) { int64_t index = bkt->lastEmptyBlockIndex; while (index >= 0) { struct pk_memblock* b = &bkt->blocks[index]; struct pk_memblock* nb = &bkt->blocks[index + 1]; if (b->data < block->data) { break; } nb->data = b->data; nb->size = b->size; index -= 1; } struct pk_memblock *b = &bkt->blocks[index + 1]; b->data = block->data; b->size = block->size; bkt->lastEmptyBlockIndex += 1; } void pk_bucket_collapse_empty_blocks(struct pk_membucket* bkt) { size_t i, ii; for (ii = bkt->lastEmptyBlockIndex+1; ii > 0; --ii) { i = ii-1; struct pk_memblock* block = &bkt->blocks[i]; if (block->size == 0 && i == bkt->lastEmptyBlockIndex) { block->data = nullptr; bkt->lastEmptyBlockIndex -= 1; continue; } if (block->size > 0) { continue; } for (size_t k = i; k < bkt->lastEmptyBlockIndex; ++k) { bkt->blocks[k].data = bkt->blocks[k + 1].data; bkt->blocks[k].size = bkt->blocks[k + 1].size; } bkt->lastEmptyBlockIndex -= 1; } } void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC return malloc(sz); #endif if (sz == 0) return nullptr; assert((bkt->size - bkt->head) > (sz + alignment -1) && "Not enough space in bucket"); size_t i; size_t calculatedAlignment = alignment < PK_MINIMUM_ALIGNMENT ? PK_MINIMUM_ALIGNMENT : alignment; size_t misalignment = 0; struct pk_memblock* prevBlock = nullptr; struct pk_memblock* block = nullptr; struct pk_memblock* nextBlock = nullptr; void* data = nullptr; mtx_lock(&bkt->mtx); for (i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { struct pk_memblock* blk = &bkt->blocks[i]; misalignment = (size_t)(blk->data) % calculatedAlignment; misalignment = (calculatedAlignment - misalignment) % calculatedAlignment; if (blk->size >= sz + misalignment) { block = blk; if (i < bkt->lastEmptyBlockIndex && bkt->blocks[i + 1].data == block->data + block->size) { nextBlock = &bkt->blocks[i + 1]; } if (i > 0 && i != bkt->lastEmptyBlockIndex && (bkt->blocks[i-1].data + bkt->blocks[i-1].size) == block->data) { prevBlock = &bkt->blocks[i - 1]; } break; } } if (block == nullptr) { mtx_unlock(&bkt->mtx); assert(block != nullptr && "memory corruption: not enough space in chosen bkt"); } data = block->data + misalignment; #ifdef PK_MEMORY_DEBUGGER bool handled = bkt->transient; if (handled == false) { for (i = 0; i < debug_alloc_head; ++i) { struct pk_dbg_memblock* mb = &debug_all_allocs[i]; if (mb->bkt != NULL) continue; assert((mb->blk.size == 0 || (void*)(mb->blk.data) != data) && "mem address alloc'd twice!"); if (mb->blk.size == 0) { mb->blk.data = (char*)(data); mb->blk.size = sz; mb->bkt = bkt; handled = true; break; } } } if (handled == false) { i = debug_alloc_head++; debug_all_allocs[i].blk.data = (char*)data; debug_all_allocs[i].blk.size = sz; debug_all_allocs[i].bkt = bkt; } #endif int64_t afterSize = block->size - (misalignment + sz); if (block->data == bkt->ptr + bkt->head) { bkt->head += (sz + misalignment); } if (afterSize > 0 && nextBlock == nullptr) { struct pk_memblock newBlock; memset(&newBlock, 0, sizeof(struct pk_memblock)); newBlock.data = block->data + misalignment + sz; newBlock.size = afterSize; pk_bucket_insert_block(bkt, &newBlock); } if (prevBlock == nullptr && nextBlock == nullptr) { block->size = misalignment; } else if (nextBlock != nullptr) { block->size = misalignment; nextBlock->data -= afterSize; nextBlock->size += afterSize; } else if (prevBlock != nullptr) { prevBlock->size += misalignment; block->data += misalignment + sz; block->size = 0; // if you make it here, afterSize has already been handled } bkt->allocs++; assert(data >= bkt->raw && "allocated data is before bucket data"); assert((char*)data <= bkt->ptr + bkt->size && "allocated data is after bucket data"); pk_bucket_collapse_empty_blocks(bkt); #ifdef PK_MEMORY_DEBUGGER if (!bkt->transient) { int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); for (i = 0; i < debug_alloc_head; ++i) { if (debug_all_allocs[i].bkt != bkt) continue; debug_tracked_alloc_size += debug_all_allocs[i].blk.size; } for (i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { debug_bucket_alloc_size -= bkt->blocks[i].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); return data; } void* pk_new_base(size_t sz, size_t alignment) { struct pk_membucket* bkt = nullptr; for (size_t i = 0; i < pk_bucket_head; ++i) { if (pk_buckets[i].transient == true) { continue; } if (pk_buckets[i].size - pk_buckets[i].head < sz + (alignment - 1)) { continue; } bkt = &pk_buckets[i]; break; } if (bkt == nullptr) { bkt = &pk_buckets[pk_bucket_create_inner(PK_DEFAULT_BUCKET_SIZE, false, "pk_bucket internally created")]; } return pk_new_bkt(sz, alignment, bkt); } void* pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt) { if (bkt != NULL) return pk_new_bkt(sz, alignment, bkt); return pk_new_base(sz, alignment); } void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC return std::free(const_cast(ptr)); #endif size_t i; mtx_lock(&bkt->mtx); assert(bkt->allocs > 0); assert(ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size && "pointer not in memory bucket range"); assert(sz > 0 && "attempted to free pointer of size 0"); #ifdef PK_MEMORY_DEBUGGER size_t ii; bool found = bkt->transient; if (found == false) { for (ii = debug_alloc_head; ii > 0; --ii) { i = ii-1; struct pk_dbg_memblock* mb = &debug_all_allocs[i]; if (mb->bkt != bkt) continue; if (mb->blk.size == 0) continue; if ((void*)(mb->blk.data) == ptr) { assert(mb->blk.size == sz && "[pkmem.h] incorrect free size"); mb->blk.size = 0; mb->bkt = NULL; found = true; if (i == (debug_alloc_head - 1)) { debug_alloc_head--; } break; } } } assert(found && "[pkmem.h] double free or invalid ptr"); #endif bkt->allocs--; if (bkt->allocs == 0) { bkt->head = 0; bkt->lastEmptyBlockIndex = 0; bkt->blocks[0].data = bkt->ptr; bkt->blocks[0].size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); mtx_unlock(&bkt->mtx); return; } char* afterPtr = ((char*)(ptr))+sz; struct pk_memblock* beforeBlk = nullptr; struct pk_memblock* afterBlk = nullptr; for (i = bkt->lastEmptyBlockIndex; i > 0; --i) { if (bkt->blocks[i-1].data + bkt->blocks[i-1].size == ptr) { beforeBlk = &bkt->blocks[i-1]; } if (bkt->blocks[i].data == afterPtr) { afterBlk = &bkt->blocks[i]; break; } if (bkt->blocks[i-1].data < (char*)ptr) { break; } } if (ptr == bkt->ptr && afterBlk == nullptr && bkt->blocks[0].data == afterPtr) { afterBlk = &bkt->blocks[0]; } if (afterBlk != nullptr && afterBlk->data == bkt->ptr + bkt->head) { bkt->head -= sz; if (beforeBlk != nullptr) { bkt->head -= beforeBlk->size; } } if (beforeBlk == nullptr && afterBlk == nullptr) { struct pk_memblock newBlock; memset(&newBlock, 0, sizeof(struct pk_memblock)); newBlock.data = (char*)ptr; newBlock.size = sz; pk_bucket_insert_block(bkt, &newBlock); } else if (beforeBlk != nullptr && afterBlk != nullptr) { beforeBlk->size += sz + afterBlk->size; afterBlk->size = 0; } else if (beforeBlk != nullptr) { beforeBlk->size += sz; } else if (afterBlk != nullptr) { afterBlk->data -= sz; afterBlk->size += sz; } pk_bucket_collapse_empty_blocks(bkt); #ifdef PK_MEMORY_DEBUGGER if (!bkt->transient) { int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount); for (i = 0; i < debug_alloc_head; ++i) { if (debug_all_allocs[i].bkt != bkt) continue; debug_tracked_alloc_size += debug_all_allocs[i].blk.size; } for (i = 0; i <= bkt->lastEmptyBlockIndex; ++i) { debug_bucket_alloc_size -= bkt->blocks[i].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); } void pk_delete_base(const void* ptr, size_t sz) { struct pk_membucket* bkt = nullptr; for (size_t i = 0; i < pk_bucket_head; ++i) { bkt = &pk_buckets[i]; if (ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size) break; } assert(bkt != nullptr && "failed to determine correct memory bucket"); pk_delete_bkt(ptr, sz, bkt); } void pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt) { if (bkt != NULL) { pk_delete_bkt(ptr, sz, bkt); return; } pk_delete_base(ptr, sz); return; } #endif /* PK_IMPL_MEM */ #ifndef PK_STR_H #define PK_STR_H #include struct pk_str { char *val; uint32_t length; uint32_t reserved; }; struct pk_cstr { const char *val; uint32_t length; uint32_t reserved; }; struct pk_str cstring_to_pk_str(char *s); struct pk_cstr cstring_to_pk_cstr(const char *s); struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s); struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s); int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs); int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs); #endif /* PK_STR_H */ #ifdef PK_IMPL_STR #include struct pk_str cstring_to_pk_str(char *s) { struct pk_str ret; ret.val = s; ret.length = (uint32_t)(strlen(s)); ret.reserved = 0; return ret; } struct pk_cstr cstring_to_pk_cstr(const char *s) { struct pk_cstr ret; ret.val = s; ret.length = (uint32_t)(strlen(s)); ret.reserved = 0; return ret; } struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s) { struct pk_str ret; ret.val = (char *)s->val; ret.length = s->length; ret.reserved = s->reserved; return ret; } struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s) { struct pk_cstr ret; ret.val = (char *)s->val; ret.length = s->length; ret.reserved = s->reserved; return ret; } int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } #endif /* PK_IMPL_STR */ #ifndef PK_EV_H #define PK_EV_H #include typedef uint64_t pk_ev_mgr_id_T; typedef uint64_t pk_ev_id_T; typedef uint64_t pk_ev_cb_id_T; // TODO re-think threading // note: pk_ev_init() is NOT thread-safe void pk_ev_init(); // note: pk_ev_teardown() is NOT thread-safe void pk_ev_teardown(); pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data); pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data); pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data); void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data); void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid); #endif /* PK_EV_H */ #ifdef PK_IMPL_EV #include #include #include #include #include #include #include #ifndef PK_EV_INIT_MGR_COUNT # define PK_EV_INIT_MGR_COUNT 1 #endif #ifndef PK_EV_INIT_EV_COUNT # define PK_EV_INIT_EV_COUNT 16 #endif #ifndef PK_EV_INIT_CB_COUNT # define PK_EV_INIT_CB_COUNT 8 #endif #ifndef PK_EV_GROW_RATIO # define PK_EV_GROW_RATIO 1.5 #endif struct pk_ev_cb { pk_ev_cb_fn *cb; void *user_cb_data; }; struct pk_ev { struct pk_ev_cb *ev_cbs; void *user_ev_data; atomic_uint_fast64_t left_ev_cbs; atomic_uint_fast64_t right_ev_cbs; }; struct pk_ev_mgr { struct pk_ev *ev; atomic_uint_fast64_t n_ev; // reserved length of `pk_ev`s on this struct atomic_uint_fast64_t rn_ev; // on any given `pk_ev`, the number of callbacks reserved atomic_uint_fast64_t rn_cb; }; struct pk_ev_mstr { atomic_uint_fast64_t flg_mgrs; atomic_uint_fast64_t rn_mgrs; struct pk_ev_mgr **mgrs; mtx_t *mtxs; }; struct pk_ev_mstr pk_ev_mstr; void pk_ev_init() { int i; pk_ev_mstr.mgrs = (struct pk_ev_mgr **)malloc(sizeof(void *) * PK_EV_INIT_MGR_COUNT); pk_ev_mstr.mtxs = (mtx_t*)malloc(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) { mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain); } atomic_store(&pk_ev_mstr.flg_mgrs, 0); atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT); } void pk_ev_teardown() { long unsigned int i; for (i = 0; i < pk_ev_mstr.rn_mgrs; ++i) { if ((atomic_load(&pk_ev_mstr.rn_mgrs) & (1lu << i)) == 0) continue; mtx_lock(&pk_ev_mstr.mtxs[i]); free(pk_ev_mstr.mgrs[i]); pk_ev_mstr.mgrs[i] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[i]); mtx_destroy(&pk_ev_mstr.mtxs[i]); } free(pk_ev_mstr.mgrs); free(pk_ev_mstr.mtxs); pk_ev_mstr.mgrs = NULL; pk_ev_mstr.mtxs = NULL; } size_t pk_ev_inner_calc_sz(uint64_t ev_count, uint64_t cb_count, size_t *sz_ev_list, size_t *sz_ev_cb_list) { // base sizes if (sz_ev_list != nullptr) *sz_ev_list = sizeof(struct pk_ev) * ev_count; if (sz_ev_cb_list != nullptr) *sz_ev_cb_list = sizeof(struct pk_ev_cb) * cb_count; size_t ret = sizeof(struct pk_ev_mgr); if (sz_ev_list != nullptr) ret += *sz_ev_list; if (sz_ev_cb_list != nullptr) ret += *sz_ev_cb_list * ev_count; return ret; } static struct pk_ev_mgr* pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { assert(ev_count < 0x100); assert(cb_count < 0x100); uint64_t i; struct pk_ev *ev; size_t sz_ev_list; size_t sz_ev_cb_list; size_t sz = pk_ev_inner_calc_sz(ev_count, cb_count, &sz_ev_list, &sz_ev_cb_list); size_t sz_offset; struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz); if (mgr == NULL) goto early_exit; mgr->ev = (struct pk_ev*)(((char *)mgr) + sizeof(struct pk_ev_mgr)); atomic_init(&mgr->rn_ev, ev_count); atomic_init(&mgr->rn_cb, cb_count); atomic_init(&mgr->n_ev, 0); for (i = 0; i < ev_count; ++i) { ev = &mgr->ev[i]; atomic_init(&ev->left_ev_cbs, 0); atomic_init(&ev->right_ev_cbs, 0); sz_offset = sizeof(struct pk_ev_mgr); sz_offset += sz_ev_list; sz_offset += sz_ev_cb_list * i; ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sz_offset); } early_exit: return mgr; } static void pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) { uint64_t i; struct pk_ev *ev_old; struct pk_ev *ev; atomic_store(&mgr->n_ev, atomic_load(&old->n_ev)); size_t old_sz_ev_cb_list; for (i = 0; i < old->n_ev; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; pk_ev_inner_calc_sz(0, atomic_load(&ev_old->right_ev_cbs), nullptr, &old_sz_ev_cb_list); ev->user_ev_data = ev_old->user_ev_data; memcpy(ev->ev_cbs, ev_old->ev_cbs, old_sz_ev_cb_list); atomic_store(&ev->left_ev_cbs, atomic_load(&ev_old->left_ev_cbs)); atomic_store(&ev->right_ev_cbs, atomic_load(&ev_old->right_ev_cbs)); } } pk_ev_mgr_id_T pk_ev_create_mgr() { uint64_t i; pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; pk_ev_mgr_id_T id; struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT); if (mgr == NULL) return -1; start: flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((flg & (1u << i)) == 0) break; } if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) { goto recreate; } id = i; flg_new |= (1u << i); if (atomic_compare_exchange_strong(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } pk_ev_mstr.mgrs[id]= mgr; return id; recreate: // TODO recreate mgr, out of space assert(1 == 0 && "[pkev.h] Out of mgr space."); exit(1); goto start; } void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) { mtx_lock(&pk_ev_mstr.mtxs[evmgr]); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); } pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data) { assert(evmgr < 64); uint64_t new_size; pk_ev_id_T id; struct pk_ev_mgr *mgr = nullptr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) { new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO)); if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_ev) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow ev count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(new_size, pk_ev_mstr.mgrs[evmgr]->rn_cb); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } id = pk_ev_mstr.mgrs[evmgr]->n_ev++; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data; return id; } pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data) { assert(evmgr < 64); bool found = false; uint64_t new_size, i; struct pk_ev_mgr *mgr = nullptr; pk_ev_cb_id_T cb_index; if (pk_ev_mstr.mgrs[evmgr] == nullptr) { PK_LOGV_ERR("[pkev.h] unknown manager: '%lu'.\n", evmgr); exit(1); } for (i = pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (found == false && pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb != nullptr) { found = true; cb_index = i; continue; } if (found == false) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs = i; break; } } if (found == false) { mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) { new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO)); if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow cb count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, new_size); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; mgr = nullptr; } cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs++; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); if (cb_index == pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs++; } } pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb; pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data; return cb_index; } void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data) { assert(evmgr < 64); uint8_t i; for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (pk_ev_mstr.mgrs[evmgr] == nullptr) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) continue; (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)( pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data, pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data, user_emit_data); } } void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid) { struct pk_ev_mgr *mgr = pk_ev_mstr.mgrs[evmgr]; if (mgr == nullptr) return; if (mgr->ev[evid].left_ev_cbs > cbid) { mgr->ev[evid].left_ev_cbs = cbid; } mgr->ev[evid].ev_cbs[cbid].cb = nullptr; mgr->ev[evid].ev_cbs[cbid].user_cb_data = nullptr; } #endif /* PK_IMPL_EV */ #ifndef PK_PKARR_H #define PK_PKARR_H #include struct pk_arr { uint32_t next; uint32_t reserved; uint32_t stride; uint32_t alignment; struct pk_membucket *bkt; void *data; }; typedef bool(pk_arr_item_compare)(void *user_data, void *item); void pk_arr_clear(struct pk_arr *arr); void pk_arr_reset(struct pk_arr *arr); void pk_arr_reserve(struct pk_arr *arr, uint32_t count); void pk_arr_resize(struct pk_arr *arr, uint32_t count); void pk_arr_move_to_back(struct pk_arr *arr, uint32_t index); void pk_arr_append(struct pk_arr *arr, void *data); void pk_arr_remove_at(struct pk_arr *arr, uint32_t index); void pk_arr_clone(struct pk_arr *lhs, struct pk_arr *rhs); void pk_arr_swap(struct pk_arr *lhs, struct pk_arr *rhs); uint32_t pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn); #if defined(__cplusplus) template struct pk_arr_t : public pk_arr { pk_arr_t(); pk_arr_t(struct pk_membucket *bkt); pk_arr_t(const pk_arr_t &other); pk_arr_t(pk_arr_t &&other); pk_arr_t &operator=(const pk_arr_t &other); pk_arr_t &operator=(pk_arr_t &&other); ~pk_arr_t(); T &operator[](size_t index); }; template pk_arr_t::pk_arr_t() { this->next = 0; this->reserved = 0; this->stride = sizeof(T); this->alignment = alignof(T); this->bkt = NULL; this->data = NULL; } template pk_arr_t::pk_arr_t(struct pk_membucket *bkt) : pk_arr_t() { this->bkt = bkt; } template pk_arr_t::pk_arr_t(const pk_arr_t &other) { // copy ctor pk_arr_clone(static_cast(&const_cast&>(other)), this); } template pk_arr_t::pk_arr_t(pk_arr_t &&other) { // move ctor pk_arr_swap(this, &other); other.data = NULL; } template pk_arr_t & pk_arr_t::operator=(const pk_arr_t &other) { // copy assignment if (this->data != NULL) { pk_arr_reset(this); } pk_arr_clone(static_cast(&const_cast&>(other)), this); return *this; } template pk_arr_t & pk_arr_t::operator=(pk_arr_t &&other) { // move assignment if (this->data != NULL) { pk_arr_reset(this); } pk_arr_swap(this, &other); other.data = NULL; return *this; } template pk_arr_t::~pk_arr_t() { if (this->data != NULL) pk_delete(this->data, this->stride * this->reserved, this->bkt); } template T &pk_arr_t::operator[](size_t index) { if(index >= this->next) throw "pk_arr_t::operator[] out of range"; return reinterpret_cast(this->data)[index]; } template void pk_arr_append_t(pk_arr_t *arr, const T &item) { pk_arr_append(arr, &const_cast(item)); } #endif #endif /* PK_PKARR_H */ #ifdef PK_IMPL_ARR #ifndef PK_ARR_GROW_RATIO #define PK_ARR_GROW_RATIO 1.5 #endif #ifndef PK_ARR_INITIAL_COUNT #define PK_ARR_INITIAL_COUNT 16 #endif void pk_arr_clear(struct pk_arr *arr) { arr->next = 0; } void pk_arr_reset(struct pk_arr *arr) { if (arr->data != NULL) pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = NULL; arr->next = 0; arr->reserved = 0; } void pk_arr_reserve(struct pk_arr *arr, uint32_t count) { if (arr->reserved >= count) return; void *new_data = pk_new(arr->stride * count, arr->alignment, arr->bkt); if (arr->data != NULL) { if (arr->next != 0) { memcpy(new_data, arr->data, arr->stride * arr->reserved); } pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); } arr->reserved = count; arr->data = new_data; } void pk_arr_resize(struct pk_arr *arr, uint32_t count) { pk_arr_reserve(arr, count); arr->next = count; } void pk_arr_move_to_back(struct pk_arr *arr, uint32_t index) { if (arr->reserved == 0) return; if (arr->next <= 1) return; #ifdef PK_ARR_MOVE_IN_PLACE uint32_t i, ii; char *target = (char *)pk_new(arr->stride, arr->alignment, arr->bkt); char *buffer = (char *)arr->data; // copy bytes to temp buffer for (ii = 0, i = arr->stride * index; ii < arr->stride; ++ii, ++i) { target[ii] = buffer[i]; } // shift everything forward // arr->stride = 8 // arr->next = 2 // index = 0 // // for (i = 0; i < 8; ++i) { // b[i] = b[i + 8] // } // b[00] = b[08] // b[01] = b[09] // ... // b[07] = b[15] for (i = arr->stride * index; i < (arr->stride * (arr->next - 1)); ++i) { buffer[i] = buffer[i + arr->stride]; } // copy temp buffer back into arr // arr->stride = 8 // arr->next = 2 // index = 0 // // for (ii = 0, i = 8; ii < 8; ++ii, ++i) { // b[i] = t[ii] // } // b[08] = t[00] // b[09] = t[01] // ... // b[15] = t[07] for (ii = 0, i = arr->stride * (arr->next - 1); ii < arr->stride; ++ii, ++i) { buffer[i] = target[ii]; } pk_delete(target, arr->stride, arr->bkt); #else char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt); if (index > 0) { memcpy(new_data, arr->data, arr->stride * index); } memcpy( new_data + (arr->stride * (arr->next - 1)), ((char *)arr->data) + (arr->stride * index), arr->stride); memcpy( new_data + (arr->stride * index), ((char *)arr->data) + (arr->stride * (index + 1)), arr->stride * (arr->next - index - 1)); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = (void *)new_data; #endif } void pk_arr_append(struct pk_arr *arr, void *data) { if (arr->reserved == arr->next) { uint32_t new_count = PK_MAX(arr->reserved == 0 ? PK_ARR_INITIAL_COUNT : arr->reserved * PK_ARR_GROW_RATIO, arr->reserved + 1); void *new_data = pk_new(arr->stride * new_count, arr->alignment, arr->bkt); if (arr->data != NULL) { memcpy(new_data, arr->data, arr->stride * arr->reserved); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); } arr->data = new_data; arr->reserved = new_count; } memcpy(((char *)arr->data) + (arr->stride * arr->next), data, arr->stride); arr->next += 1; return; } void pk_arr_remove_at(struct pk_arr *arr, uint32_t index) { if (arr->reserved == 0) return; if (index == arr->next - 1) { arr->next -=1; return; } #ifdef PK_ARR_MOVE_IN_PLACE uint32_t i; char *buffer = (char *)arr->data; // shift everything forward // arr->stride = 8 // arr->next = 3 // index = 0 // // for (i = 0; i < 16; ++i) { // b[i] = b[i + 8] // } // b[00] = b[08] // b[01] = b[09] // ... // b[15] = b[23] for (i = arr->stride * index; i < arr->stride * arr->next; ++i) { buffer[i] = buffer[i + arr->stride]; } #else char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt); if (index > 0) { memcpy(new_data, arr->data, arr->stride * index); } memcpy( new_data + (arr->stride * index), ((char *)arr->data) + (arr->stride * (index + 1)), arr->stride * (arr->next - index - 1)); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = (void *)new_data; #endif arr->next -= 1; } void pk_arr_clone(struct pk_arr *lhs, struct pk_arr *rhs) { size_t sz; *rhs = *lhs; if (lhs->data == NULL) return; sz = lhs->stride * lhs->reserved; rhs->data = pk_new(sz, lhs->alignment, lhs->bkt); memcpy(rhs->data, lhs->data, sz); } void pk_arr_swap(struct pk_arr *lhs, struct pk_arr *rhs) { struct pk_arr tmp = *lhs; *lhs = *rhs; *rhs = tmp; } uint32_t pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn) { uint32_t i; char *char_data = (char *)arr->data; for (i = 0; i < arr->next; ++i) { if (fn(user_data, char_data + (arr->stride * i))) return i; } return -1; } #endif /* PK_IMPL_ARR */ #ifndef PK_PK_STN_H #define PK_PK_STN_H #include #include #include #include #include enum PK_STN_RES { PK_STN_RES_SUCCESS, PK_STN_RES_OVERFLOW, PK_STN_RES_UNDERFLOW, PK_STN_RES_INCONVERTIBLE }; enum PK_STN_RES pk_stn_int64_t(int64_t *i, char const *s, int base); enum PK_STN_RES pk_stn_uint64_t(uint64_t *i, char const *s, int base); enum PK_STN_RES pk_stn_int32_t(int32_t *i, char const *s, int base); enum PK_STN_RES pk_stn_uint32_t(uint32_t *i, char const *s, int base); enum PK_STN_RES pk_stn_int16_t(int16_t *i, char const *s, int base); enum PK_STN_RES pk_stn_uint16_t(uint16_t *i, char const *s, int base); enum PK_STN_RES pk_stn_int8_t(int8_t *i, char const *s, int base); enum PK_STN_RES pk_stn_uint8_t(uint8_t *i, char const *s, int base); enum PK_STN_RES pk_stn_float(float *f, char const *s); enum PK_STN_RES pk_stn_double(double *d, char const *s); enum PK_STN_RES pk_stn_float_e(float *f, char const *s, char **pEnd); enum PK_STN_RES pk_stn_double_e(double *d, char const *s, char **pEnd); #if defined(__cplusplus) template enum PK_STN_RES pk_stn(T *n, char const *s, int base = 0) { if constexpr(std::is_same::value) { return pk_stn_int64_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_uint64_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_int32_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_uint32_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_int16_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_uint16_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_int8_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_uint8_t(n, s, base); } if constexpr(std::is_same::value) { return pk_stn_float(n, s); } if constexpr(std::is_same::value) { return pk_stn_double(n, s); } return (PK_STN_RES)-1; } #endif /* defined(__cplusplus) */ #endif /* PK_PK_STN_H */ #ifdef PK_IMPL_STN enum PK_STN_RES pk_stn_int64_t(int64_t *i, char const *s, int base) { char *end; long long l; errno = 0; l = strtoll(s, &end, base); if (errno == ERANGE) { if (l == LLONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint64_t(uint64_t *i, char const *s, int base) { char *end; unsigned long long l; errno = 0; l = strtoull(s, &end, base); if (errno == ERANGE) { if (l == ULLONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int32_t(int32_t *i, char const *s, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint32_t(uint32_t *i, char const *s, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int16_t(int16_t *i, char const *s, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint16_t(uint16_t *i, char const *s, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int8_t(int8_t *i, char const *s, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint8_t(uint8_t *i, char const *s, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || *end != '\0') { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_float(float *f, char const *s) { char *end; return pk_stn_float_e(f, s, &end); } enum PK_STN_RES pk_stn_double(double *d, char const *s) { char *end; return pk_stn_double_e(d, s, &end); } enum PK_STN_RES pk_stn_float_e(float *f, char const *s, char **pEnd) { float l; errno = 0; l = strtof(s, pEnd); if (errno == ERANGE && l == HUGE_VALF) { return PK_STN_RES_OVERFLOW; } if (errno == ERANGE && l == -HUGE_VALF) { return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || &s == (const char **)pEnd) { return PK_STN_RES_INCONVERTIBLE; } *f = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_double_e(double *d, char const *s, char **pEnd) { double l; errno = 0; l = strtod(s, pEnd); if (errno == ERANGE && l == HUGE_VAL) { return PK_STN_RES_OVERFLOW; } if (errno == ERANGE && l == -HUGE_VAL) { return PK_STN_RES_UNDERFLOW; } if (*s == '\0' || &s == (const char **)pEnd) { return PK_STN_RES_INCONVERTIBLE; } *d = l; return PK_STN_RES_SUCCESS; } #endif /* PK_IMPL_STN */ #ifndef PK_PKTMR_H #define PK_PKTMR_H #include /* 2024-12-17 JCB * I have read that in more recent Linux kernels, _MONOTONIC and _REALTIME * do not require syscalls, while all of the other calls can. * In testing on my personal machine, this seems to hold true. Using * CLOCK_PROCESS_CPUTIME_ID consistently elapsed thousands of nanoseconds, * even with no work between sequential _start() and _stop() calls. * Meanwhile, the same test with _MONOTONIC elapsed only tens of nanoseconds. */ /* struct pk_tmr */ struct pk_tmr { struct timespec b; // begin struct timespec e; // end }; #ifndef PK_TMR_CLOCK #define PK_TMR_CLOCK CLOCK_MONOTONIC #endif #define pk_tmr_start(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.b); } #define pk_tmr_stop(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.e); } #define pk_tmr_duration_u64_nano(tmr) ((((unsigned long long int)tmr.e.tv_sec * 1000000000llu) + tmr.e.tv_nsec) - (((unsigned long long int)tmr.b.tv_sec * 1000000000llu) + (unsigned long long int)tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_nano(tmr) ((1e+9 * tmr.e.tv_sec + tmr.e.tv_nsec) - (1e+9 * tmr.b.tv_sec + tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_micro(tmr) ((1e+6 * tmr.e.tv_sec + 1e-3 * tmr.e.tv_nsec) - (1e+6 * tmr.b.tv_sec + 1e-3 * tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_mili(tmr) ((1e+3 * tmr.e.tv_sec + 1e-6 * tmr.e.tv_nsec) - (1e+3 * tmr.b.tv_sec + 1e-6 * tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_scnd(tmr) ((tmr.e.tv_sec + 1e-9 * tmr.e.tv_nsec) - (tmr.b.tv_sec + 1e-9 * tmr.b.tv_nsec)) #endif /* PK_PKTMR_H */ #ifndef PK_UUID_H #define PK_UUID_H #include "stddef.h" #include struct pk_uuid { alignas(max_align_t) unsigned char uuid[16]; }; const struct pk_uuid pk_uuid_zed = { .uuid = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }; const struct pk_uuid pk_uuid_max = { .uuid = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF } }; #define pk_uuid_printf_format PK_Q(%.2x%.2x%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x%.2x%.2x%.2x%.2x) #define pk_uuid_printf_var(id) id.uuid[0], id.uuid[1], id.uuid[2], id.uuid[3], id.uuid[4], id.uuid[5], id.uuid[6], id.uuid[7], id.uuid[8], id.uuid[9], id.uuid[10], id.uuid[11], id.uuid[12], id.uuid[13], id.uuid[14], id.uuid[15] void pk_uuid_init(time_t srand_seed); void pk_uuid_teardown(); struct pk_uuid pk_uuid_new_v7(); bool pk_uuid_equals(struct pk_uuid lhs, struct pk_uuid rhs); #if defined(__cplusplus) #include #include std::ostream& operator<<(std::ostream &o, const struct pk_uuid& uuid); std::istream& operator>>(std::istream &i, struct pk_uuid& uuid); bool operator==(const pk_uuid &lhs, const pk_uuid &rhs); bool operator!=(const pk_uuid &lhs, const pk_uuid &rhs); #endif #endif /* PK_UUID_H */ #ifdef PK_IMPL_UUID #include #include #include // TODO JCB - 2025-03-19 // This should have platform-specific defines #ifndef PK_UUID_CLOCK #ifdef CLOCK_TAI #define PK_UUID_CLOCK CLOCK_TAI #else #define PK_UUID_CLOCK CLOCK_REALTIME #endif #endif void pk_uuid_init(time_t srand_seed) { // TODO 2025-03-19 - JCB // pk.h should NOT be setting srand. // Replace dependency on rand/srand with a sufficient rand() implementation. // I would prefer if generating a UUID did not advance a global random. // Consider creating a pkrand.h to resolve this. srand(srand_seed); } void pk_uuid_teardown() { } struct pk_uuid pk_uuid_new_v7() { const int n = 1; uint32_t r; // https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7 struct pk_uuid ret; struct timespec t; clock_gettime(PK_UUID_CLOCK, &t); uint32_t sec = (uint32_t)t.tv_sec; uint32_t nsec = (uint32_t)t.tv_nsec; // [000-047] (6 bytes) big-endian unix epoch // TODO test this on a big-endian machine, I don't think this is correct. // This `if` determines if we are big or little endian. // A return value of 1 says we are little endian, so swap the bits. if (*(char *)&n == 1) { ret.uuid[0] = (uint8_t)((sec & 0xFF000000) >> 24); ret.uuid[1] = (uint8_t)((sec & 0x00FF0000) >> 16); ret.uuid[2] = (uint8_t)((sec & 0x0000FF00) >> 8); ret.uuid[3] = (uint8_t)((sec & 0x000000FF) >> 0); ret.uuid[4] = (uint8_t)((nsec & 0x0000FF00) >> 8); ret.uuid[5] = (uint8_t)((nsec & 0x000000FF) >> 0); } else { ret.uuid[0] = (uint8_t)((sec & 0xFF000000) >> 0); ret.uuid[1] = (uint8_t)((sec & 0x00FF0000) >> 8); ret.uuid[2] = (uint8_t)((sec & 0x0000FF00) >> 16); ret.uuid[3] = (uint8_t)((sec & 0x000000FF) >> 24); ret.uuid[4] = (uint8_t)((nsec & 0xFF000000) >> 0); ret.uuid[5] = (uint8_t)((nsec & 0x00FF0000) >> 8); } // [052-127] random r = (uint32_t)rand(); if (*(char *)&n == 1) { ret.uuid[8] = (uint8_t)((r & 0xFF000000) >> 24); ret.uuid[9] = (uint8_t)((r & 0x00FF0000) >> 16); ret.uuid[10] = (uint8_t)((r & 0x0000FF00) >> 8); ret.uuid[11] = (uint8_t)((r & 0x000000FF) >> 0); } else { ret.uuid[8] = (uint8_t)((r & 0xFF000000) >> 0); ret.uuid[9] = (uint8_t)((r & 0x00FF0000) >> 8); ret.uuid[10] = (uint8_t)((r & 0x0000FF00) >> 16); ret.uuid[11] = (uint8_t)((r & 0x000000FF) >> 24); } r = rand(); if (*(char *)&n == 1) { ret.uuid[12] = (uint8_t)((r & 0xFF000000) >> 24); ret.uuid[13] = (uint8_t)((r & 0x00FF0000) >> 16); ret.uuid[14] = (uint8_t)((r & 0x0000FF00) >> 8); ret.uuid[15] = (uint8_t)((r & 0x000000FF) >> 0); } else { ret.uuid[12] = (uint8_t)((r & 0xFF000000) >> 0); ret.uuid[13] = (uint8_t)((r & 0x00FF0000) >> 8); ret.uuid[14] = (uint8_t)((r & 0x0000FF00) >> 16); ret.uuid[15] = (uint8_t)((r & 0x000000FF) >> 24); } ret.uuid[6] = ret.uuid[9] ^ ret.uuid[12]; ret.uuid[7] = ret.uuid[10] ^ ret.uuid[15]; // [048-051] v7 nibble // version must be 0x7_ // 0x70 is 0b01110000 // 0x7F is 0b01111111 ret.uuid[6] |= 0x70; ret.uuid[6] &= 0x7F; // [064-065] 2-bit variant field // variant must be 0b10 // 0x80 is 0b10000000 // 0xBF is 0b10111111 ret.uuid[8] |= 0x80; ret.uuid[8] &= 0xBF; return ret; } bool pk_uuid_equals(struct pk_uuid lhs, struct pk_uuid rhs) { int i; for (i = 0; i < 16; ++i) { if (lhs.uuid[i] != rhs.uuid[i]) return false; } return true; } #if defined(__cplusplus) std::ostream& operator<<(std::ostream &o, const struct pk_uuid& uuid) { int i; std::ios_base::fmtflags orig_flags = o.flags(); auto fill = o.fill(); o << std::hex; for (i = 0; i < 4; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 4; i < 6; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 6; i < 8; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 8; i < 10; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 10; i < 16; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o.fill(fill); o.flags(orig_flags); return o; } std::istream& operator>>(std::istream &i, struct pk_uuid& uuid) { char c[3]; char k = 0; char offset = 0; c[2] = '\0'; for (k = 0; k < 20; ++k) { if (k == 4 || k == 7 || k == 10 || k == 13) { offset += 1; c[0] = i.peek(); if (c[0] != '-') { goto err_out; } i.get(); // burn continue; } i.get(c[0]); i.get(c[1]); if (pk_stn_uint8_t(&uuid.uuid[k - offset], c, 16) != PK_STN_RES_SUCCESS) { goto err_out; } } return i; err_out: i.seekg(-(((k + 1) * 2) + offset), std::ios_base::cur); uuid = pk_uuid_zed; return i; } bool operator==(const pk_uuid &lhs, const pk_uuid &rhs) { return pk_uuid_equals(lhs, rhs); } bool operator!=(const pk_uuid &lhs, const pk_uuid &rhs) { return !pk_uuid_equals(lhs, rhs); } #endif #endif #ifndef PK_PKBKTARR_H #define PK_PKBKTARR_H #define PK_BKT_ARR_HANDLE_B_MAX 0xFFFFFF #define PK_BKT_ARR_HANDLE_I_MAX 64 struct pk_bkt_arr_handle { unsigned int b : 24; unsigned int i : 8; }; struct pk_bkt_arr { struct pk_membucket *bkt_buckets; struct pk_membucket *bkt_data; unsigned long long *idx_unused; void **bucketed_data; struct pk_bkt_arr_handle head_l; struct pk_bkt_arr_handle head_r; struct pk_bkt_arr_handle limits; unsigned int reserved_buckets; unsigned long stride; unsigned long alignment; }; void pk_bkt_arr_init(struct pk_bkt_arr *bkt_arr, unsigned long stride, unsigned long alignment, struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data); void pk_bkt_arr_teardown(struct pk_bkt_arr *bkt_arr); struct pk_bkt_arr_handle pk_bkt_arr_new_handle(struct pk_bkt_arr *bkt_arr); void pk_bkt_arr_free_handle(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle); int pk_bkt_arr_handle_compare(struct pk_bkt_arr_handle lhs, struct pk_bkt_arr_handle rhs); struct pk_bkt_arr_handle pk_bkt_arr_handle_increment(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h); struct pk_bkt_arr_handle pk_bkt_arr_handle_decrement(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h); #if defined (__cplusplus) #include template struct pk_bkt_arr_t : public pk_bkt_arr { pk_bkt_arr_t(); pk_bkt_arr_t(struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets = nullptr, struct pk_membucket *bkt_data = nullptr); ~pk_bkt_arr_t(); T &operator[](struct pk_bkt_arr_handle); }; template pk_bkt_arr_t::pk_bkt_arr_t() { pk_bkt_arr_init(this, sizeof(T), alignof(T), {PK_BKT_ARR_HANDLE_B_MAX, PK_BKT_ARR_HANDLE_I_MAX}, nullptr, nullptr); } template pk_bkt_arr_t::pk_bkt_arr_t(struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data) { pk_bkt_arr_init(this, sizeof(T), alignof(T), limits, bkt_buckets, bkt_data); } template pk_bkt_arr_t::~pk_bkt_arr_t() { pk_bkt_arr_teardown(this); } template T &pk_bkt_arr_t::operator[](struct pk_bkt_arr_handle handle) { assert(this->idx_unused != nullptr); assert(this->bucketed_data != nullptr); assert(handle.b <= this->limits.b); assert(handle.i <= this->limits.i); assert(handle.b != this->head_r.b || handle.i < this->head_r.i); T** two_star_programmer = reinterpret_cast(this->bucketed_data); return two_star_programmer[handle.b][handle.i]; } #endif #endif /* PK_PKBKTARR_H */ #ifdef PK_IMPL_BKTARR #include #include void pk_bkt_arr_init(struct pk_bkt_arr *bkt_arr, unsigned long stride, unsigned long alignment, struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data) { assert(limits.b <= PK_BKT_ARR_HANDLE_B_MAX); assert(limits.i <= PK_BKT_ARR_HANDLE_I_MAX); assert(bkt_arr != nullptr); memset(bkt_arr, 0, sizeof(struct pk_bkt_arr)); bkt_arr->bkt_buckets = bkt_buckets; bkt_arr->bkt_data = bkt_data; bkt_arr->head_l.b = 0ul; bkt_arr->head_l.i = 0ul; bkt_arr->head_r.b = 0ul; bkt_arr->head_r.i = 0ul; bkt_arr->limits = limits; bkt_arr->reserved_buckets = 1; bkt_arr->stride = stride; bkt_arr->alignment = alignment; bkt_arr->idx_unused = (unsigned long long *)pk_new(sizeof(unsigned long long), alignof(unsigned long long), bkt_buckets); bkt_arr->idx_unused[0] = 0xFFFFFFFFFFFFFFFF; bkt_arr->bucketed_data = (void **)pk_new(sizeof(void *), alignof(void *), bkt_buckets); bkt_arr->bucketed_data[0] = pk_new(stride * limits.i, alignment, bkt_data); } void pk_bkt_arr_teardown(struct pk_bkt_arr *bkt_arr) { int b; size_t sz = bkt_arr->limits.i * bkt_arr->stride; if (bkt_arr->idx_unused == nullptr && bkt_arr->bucketed_data == nullptr) return; for (b = bkt_arr->reserved_buckets - 1; b > -1; --b) { pk_delete(bkt_arr->bucketed_data[b], sz, bkt_arr->bkt_data); } pk_delete((void *)bkt_arr->idx_unused, sizeof(unsigned long long) * (bkt_arr->reserved_buckets), bkt_arr->bkt_buckets); pk_delete((void *)bkt_arr->bucketed_data, sizeof(void *) * (bkt_arr->reserved_buckets), bkt_arr->bkt_buckets); memset(bkt_arr, 0, sizeof(struct pk_bkt_arr)); } struct pk_bkt_arr_handle pk_bkt_arr_new_handle(struct pk_bkt_arr *bkt_arr) { struct pk_bkt_arr_handle ret; unsigned int b, i, ii; assert(bkt_arr != nullptr); // if we have an existing open slot if (pk_bkt_arr_handle_compare(bkt_arr->head_l, bkt_arr->head_r) != 0) { ret = bkt_arr->head_l; for (b = bkt_arr->head_l.b; b < bkt_arr->reserved_buckets; ++b) { if (bkt_arr->idx_unused[b] == 0ull) continue; // I feel like you could do a binary search here, but for 64 elements is it worth it? i = bkt_arr->head_l.b == b ? bkt_arr->head_l.i + 1 : 0; ii = bkt_arr->head_r.b == b ? bkt_arr->head_r.i : PK_MIN(64, bkt_arr->limits.i); for (; i < ii; ++i) { if (bkt_arr->idx_unused[b] & (1ull << i)) { bkt_arr->head_l.b = b; bkt_arr->head_l.i = i; goto done; } } } bkt_arr->head_l = bkt_arr->head_r; goto done; } if (pk_bkt_arr_handle_compare(pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_l), bkt_arr->head_l) == 0 && bkt_arr->reserved_buckets == bkt_arr->limits.b && bkt_arr->idx_unused[bkt_arr->head_r.b] == 0) { PK_LOGV_ERR("[pk_bkt_arr_new_handle] Exceeded bucket limits!: b:%u i:%u\n", bkt_arr->limits.b, bkt_arr->limits.i); exit(1); } if (bkt_arr->head_r.b == bkt_arr->reserved_buckets && bkt_arr->head_r.i == 0) { bkt_arr->reserved_buckets += 1; unsigned long long *new_idx_unused = (unsigned long long *)pk_new(sizeof(unsigned long long) * bkt_arr->reserved_buckets, alignof(unsigned long long), bkt_arr->bkt_buckets); void **new_data_ptrs = (void **)pk_new(sizeof(void *) * bkt_arr->reserved_buckets, alignof(void *), bkt_arr->bkt_buckets); for (b = 0; b < bkt_arr->reserved_buckets - 1; ++b) { new_idx_unused[b] = bkt_arr->idx_unused[b]; new_data_ptrs[b] = bkt_arr->bucketed_data[b]; } new_idx_unused[bkt_arr->reserved_buckets - 1] = 0xFFFFFFFFFFFFFFFF; new_data_ptrs[bkt_arr->reserved_buckets - 1] = pk_new(bkt_arr->stride * bkt_arr->limits.i, bkt_arr->alignment, bkt_arr->bkt_data); pk_delete((void *)bkt_arr->idx_unused, sizeof(unsigned long long) * (bkt_arr->reserved_buckets - 1), bkt_arr->bkt_buckets); pk_delete((void *)bkt_arr->bucketed_data, sizeof(void *) * (bkt_arr->reserved_buckets - 1), bkt_arr->bkt_buckets); bkt_arr->idx_unused = new_idx_unused; bkt_arr->bucketed_data = new_data_ptrs; } ret = bkt_arr->head_r; bkt_arr->head_r = pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_r); bkt_arr->head_l = pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_l); done: bkt_arr->idx_unused[ret.b] &= ~(1ull << ret.i); return ret; } void pk_bkt_arr_free_handle(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle) { assert(bkt_arr != nullptr); bkt_arr->idx_unused[handle.b] |= (1ull << handle.i); if (handle.b < bkt_arr->head_l.b || (handle.b == bkt_arr->head_l.b && handle.i < bkt_arr->head_l.i)) { bkt_arr->head_l = handle; return; } } int pk_bkt_arr_handle_compare(struct pk_bkt_arr_handle lhs, struct pk_bkt_arr_handle rhs) { if (lhs.b == rhs.b && lhs.i == rhs.i) return 0; if (lhs.b == rhs.b) return (int)rhs.i - (int)lhs.i; return (int)rhs.b - (int)lhs.b; } struct pk_bkt_arr_handle pk_bkt_arr_handle_increment(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h) { h.i += 1; if (arr->limits.i == h.i) { if (h.b + 1 < arr->limits.b) { h.b += 1; h.i = 0; } else { h.i -= 1; } } return h; } struct pk_bkt_arr_handle pk_bkt_arr_handle_decrement(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h) { if (h.i == 0) { if (h.b != 0) { h.b -= 1; h.i = arr->limits.i; } else { return h; } } h.i -= 1; return h; } #endif /* PK_IMPL_BKTARR */ #endif /* PK_SINGLE_HEADER_FILE_H */