#ifndef PK_SINGLE_HEADER_FILE_H #define PK_SINGLE_HEADER_FILE_H /******************************************************************************* * PK Single-Header-Library V0.9.8 * * Author: Jonathan Bradley * Copyright: © 2024-2025 Jonathan Bradley * Description: * * A collection of useful programming tools, available for C and C++ as a * single-header file. To enable, in ONE single C or C++ file, declare * PK_IMPL_ALL before including pk.h. * * Example: * * pk.h.include.c * ``` c * #define PK_IMPL_ALL * #include "pk.h" * ``` * * It is also possible to enable modules ad-hoc by defining each IMPL * individually: * * pk.h.include.c * ``` c * # define PK_IMPL_MEM_TYPES * # define PK_IMPL_MEM * # define PK_IMPL_STR * # define PK_IMPL_EV * # define PK_IMPL_ARR * # define PK_IMPL_STN * #include "pk.h" * ``` * ******************************************************************************** * pkmacros.h: * * Provides a set of useful macros for a variety of uses. * * The macros PK_LOG* provide simple logging utilities. These can be overridden * by providing your own implementations of each and defining PK_LOG_OVERRIDE * before including pk.h Note that each of these are no-op'd if NDEBUG is * defined. * * The TypeSafeInt_H and TypeSafeInt_B macros provide a way to define * type-specific integers, implemented via enums. * ******************************************************************************** * pktmpl.h: only contains c++ templates, no IMPL. * * Provides template structs for trampolines, allowing c-style callbacks with * capturing lambdas. * * Examples: * ```c++ * int some_counter = 0; * using IterCbWrapper = pk_tmpln_1; * IterCbWrapper cb_wrapper{}; * cb_wrapper.func = [&some_counter](int *lhs) * { * (void)lhs; * some_counter += 1; * return; * }; * pk_bkt_arr_iterate(&bkt_arr, &IterCbWrapper::invoke, &cb_wrapper); * assert(some_count == 1); * ``` * ******************************************************************************** * pkmem-types.h: def PK_IMPL_MEM_TYPES before including pk.h to enable ad-hoc. * * Provides the types needed by pkmem, as well as a generic pk_handle featuring a * bucket+item indexing system. * ******************************************************************************** * pkmem.h: def PK_IMPL_MEM before including pk.h to enable ad-hoc. * * A bucketed memory manager. Allows for the creation of ad-hoc buckets. * * Note: Each created pk_membucket MUST call pk_bucket_destroy(bkt). Memory * buckets are client managed. * * Thread safety: "pk_new" and "pk_delete" methods *are* thread-safe, but * thread-safety is implemented per-bucket via a single mutex with long-running * lock times. PRs for a more performant thread-safe strategy are welcome, * complexity and benchmark depending. * * The following definitions (shown with defaults) can be overridden: * PK_MEM_DEFAULT_BUCKET_SIZE 256MB (client-convenience only) * PK_MINIMUM_ALIGNMENT 1 * * For debugging purposes, define the following: * PK_MEMORY_DEBUGGER : enables a tracking system for all allocs and frees to * ensure bucket validity and consistency. * PK_MEMORY_FORCE_MALLOC : completely disables pkmem and its debugging features * in favor of directly using malloc and free. Useful for out-of-bounds * checking. * ******************************************************************************** * pkstr.h: def PK_IMPL_STR before including pk.h to enable ad-hoc. * * Provides a simple string structure, allowing the user to track the string * length and reserved buffer length. Limits max string length to uint32_t max * size, which is roughly 4GB. * * Tip: set reserved to 0 for compile-time strings as well as for strings alloc'd * in a larger buffer (such as bulk-loaded data). * ******************************************************************************** * pkev.h: def PK_IMPL_EV before including pk.h to enable ad-hoc. * * Provides a simple event callback system. While the _init and _teardown * functions are NOT thread-safe, the _register and _emit functions are. * Note: uses malloc. * * Each mgr is stored contiguously with its data. Consider the following layout: * [[mgr][ev 0][ev 1][..][ev N][ev 1 cb array][ev 2 cb array][..][ev N cb array]] * * The following definitions (shown with defaults) can be overridden: * PK_EV_INIT_MGR_COUNT 1 * PK_EV_INIT_EV_COUNT 16 * PK_EV_INIT_CB_COUNT 8 * PK_EV_GROW_RATIO 1.5 * * The number of evs and cbs (per ev) is stored as a uint8_t, so a hard-limit of * 255 is to be observed for each. The number of mgrs is stored as a uint64_t. * * Note that PK_EV_GROW_RATIO is used in two scenarios: * 1. When registering an ev on a full mgr. * 2. When registering a cb on a full ev. * The grow ratio is applied to the ev count and cb count in their respective * scenarios. This causes a new allocation for the entire mgr. The existing * mgr and its evs and cbs are copied to the new larger buffer space. * Explicitly, the number of mgrs does not grow dynamically. Use * PK_EV_INIT_MGR_COUNT to control the number of mgrs. * * Note that increasing PK_EV_INIT_MGR_COUNT isn't recommended, but you may * consider doing so if you have specific size or contiguity requirements. For * example, you could -DPK_EV_INIT_EV_COUNT=1 to reduce the memory footprint of * each event/mgr, and simply create a new mgr for each needed event. Be aware * that in this provided scenario a given mgr will still grow if a second EV is * registered. * ******************************************************************************** * pkarr.h: def PK_IMPL_ARR before including pk.h to enable ad-hoc * * Provides a structure for managing contiguous lists * * The following definitions (shown with defaults) can be overridden: * PK_ARR_INITIAL_COUNT 16 * PK_ARR_GROW_RATIO 1.5 * PK_ARR_MOVE_IN_PLACE (not defined) * * The macro `PK_ARR_MOVE_IN_PLACE` ensures that when possible, the pointer value * of `arr->data` is preserved. * It is used in the following methods: * `pk_arr_move_to_back` * `pk_arr_remove_at` * This has two additinal benefits: * 1. Minimizing the number and `sz` of calls to `pk_new` * 2. Ensuring `data[0]` to `data[(N - 1) * stride]` is not copied extraneously * to a new buffer. * The speed of this will vary depending on usage, platform, and compiler. * * Initialize `stride`, `alignment`, and `bkt` (optional) members * *before* calling any `pk_arr_*` methods. * Alternatively, if using c++, use the template ctor. * * Examples: * ``` c * struct pk_arr arr = {0}; * arr.stride = sizeof(obj); // required * arr.alignment = alignof(obj); // required * arr.bkt = bkt; // optional * pk_arr_reserve(&arr, 10); // optional * pk_arr_append(&arr, &obj); * ``` * ``` c++ * struct pk_arr arr(bkt); * pk_arr_reserve(&arr, 10); // optional * pk_arr_append(&arr, &obj); * ``` * ``` c * struct pk_arr arr = {0}; * arr.stride = sizeof(obj); // required * arr.alignment = alignof(obj); // required * arr.bkt = bkt; // optional * pk_arr_resize(&arr, 10); * obj* d = (obj*)arr->data; * d[0] = ...; * ``` * ``` c++ * struct pk_arr_t arr(); * pk_arr_resize(&arr, 10); * arr[0] = {}; * ``` * ******************************************************************************** * pkstn.h: def PK_IMPL_STN before including pk.h to enable ad-hoc. * * Provides a thorough interface for interacting with the `stoi` family of * procedures. * ******************************************************************************** * pktmr.h: No IMPL define, all methods are macros. * * Offers a set of `pk_tmr*` macros for elapsed time checking. * * The following definitions (shown with defaults) can be overridden: * PK_TMR_CLOCK CLOCK_MONOTONIC * * If your needs require you to use more than one clock, I recommend calling * `clock_gettime` manually instead of calling `pk_tmr_start`/`pk_tmr_stop`. * `pk_tmr.b` is the start time. * `pk_tmr.e` end the end time. * You could then call the `pk_tmr_duration...` convenience macros as needed. * ******************************************************************************** * pkuuid.h: define PK_IMPL_UUID before including pk.h to enable ad-hoc. * * Provides a 16-byte unsigned char array struct for uuids. * * The following definitions (shown with defaults) can be overridden: * PK_UUID_CLOCK CLOCK_TAI (preferred, if available) * PK_UUID_CLOCK CLOCK_REALTIME (fallback) * * The `PK_UUID_CLOCK` macro has minimal built-in fallback logic. * The uuidv7 specification states that the timestamp portion of the uuid must be * a unix epoch, leap seconds EXCLUDED. Only `CLOCK_TAI` meets this requirement * on Linux. * * Note that this currectly calls `srand()` once at startup, and calls `rand()` * 2 times for each uuidv7 to fill 74 bits with random data (with an XOR for the * remaining 10 bits). * ******************************************************************************** * pkbktarr.h: define PK_IMPL_BKTARR before including pk.h to enable ad-hoc. * * Provides a struct for bucketed data allocation. * * Maximum (default) bucket limits are as follows: * buckets: 0xFFFFFF (16777215) * items/bucket: 0x40 (64) * * Note that you may specify separate `pk_membucket`s for the the struct's * arrays `bucketed_data` + `idx_unused`, and the actual bucketed array data * found within `bucketed_data`. * If the `pk_membucket` for "data" is exclusive to this struct, each bucket (and * by extension, the data) will be contiguious in memory. * * Examples: * ```c * struct pk_bkt_arr_handle custom_limits; * custom_limits.b = 8; * custom_limits.i = 8; * struct pk_bkt_arr arr; * pk_bkt_arr_init( * &arr, sizeof(int), alignof(int), custom_limits, bkt_buckets, bkt_data); * struct pk_bkt_arr_handle h = pk_bkt_arr_new_handle(&arr); * int **int_ptrs = (int**)arr.bucketed_data; * int_ptrs[h.b][h.i] = 128; * pk_bkt_arr_free_handle(&arr, h); * pk_bkt_arr_teardown(&arr); * ``` * ```c++ * // default limits, no pk_membucket * struct pk_bkt_arr arr(); * struct pk_bkt_arr_handle h = pk_bkt_arr_new_handle(&arr); * arr[h] = 128; * pk_bkt_arr_free_handle(&arr, h); * pk_bkt_arr_teardown(&arr); * ``` * ******************************************************************************** * pkbktarr.h: define PK_IMPL_FUNCINSTR before including pk.h to enable ad-hoc. * * Provides function instrumentation. * * Note: Currently only supports gcc/g++. * Note: Currently only prints results. * * Examples: * ```c * main() { * pk_funcinstr_init(); * ... * pk_funcinstr_teardown(); * } * ``` * ******************************************************************************** * pktst.h: define PK_IMPL_TST before including pk.h to enable ad-hoc. * * Provides a simple testing framework * * Examples: * ```c * main() { * pk_test_run_test_groups(&my_get_test_group_func, 1); * } * ``` * *******************************************************************************/ #define PK_VERSION "0.9.8" #ifdef PK_IMPL_ALL # ifndef PK_IMPL_MEM_TYPES # define PK_IMPL_MEM_TYPES # endif # ifndef PK_IMPL_MEM # define PK_IMPL_MEM # endif # ifndef PK_IMPL_STR # define PK_IMPL_STR # endif # ifndef PK_IMPL_EV # define PK_IMPL_EV # endif # ifndef PK_IMPL_ARR # define PK_IMPL_ARR # endif # ifndef PK_IMPL_STN # define PK_IMPL_STN # endif # ifndef PK_IMPL_UUID # define PK_IMPL_UUID # endif # ifndef PK_IMPL_BKTARR # define PK_IMPL_BKTARR # endif # ifndef PK_IMPL_FUNCINSTR # define PK_IMPL_FUNCINSTR # endif # ifndef PK_IMPL_TST # define PK_IMPL_TST # endif #endif #ifndef PK_MACROS_H #define PK_MACROS_H #ifndef PK_LOG_OVERRIDE # ifdef NDEBUG # define PK_LOG_ERR(str) (void)str # define PK_LOG_INF(str) (void)str # define PK_LOGV_ERR(str, ...) (void)str # define PK_LOGV_INF(str, ...) (void)str # else # define PK_LOG_ERR(str) fprintf(stderr, str) # define PK_LOG_INF(str) fprintf(stdout, str) # define PK_LOGV_ERR(str, ...) fprintf(stderr, str, __VA_ARGS__) # define PK_LOGV_INF(str, ...) fprintf(stdout, str, __VA_ARGS__) # endif #endif #define PK_CLR_RESET "\033[0m" #define PK_CLR_FG_BLACK "\033[30m" #define PK_CLR_FG_RED "\033[31m" #define PK_CLR_FG_GREEN "\033[32m" #define PK_CLR_FG_YELLOW "\033[33m" #define PK_CLR_FG_BLUE "\033[34m" #define PK_CLR_FG_MAGENTA "\033[35m" #define PK_CLR_FG_CYAN "\033[36m" #define PK_CLR_FG_WHITE "\033[37m" #define PK_CLR_BG_BLACK "\033[40m" #define PK_CLR_BG_RED "\033[41m" #define PK_CLR_BG_GREEN "\033[42m" #define PK_CLR_BG_YELLOW "\033[43m" #define PK_CLR_BG_BLUE "\033[44m" #define PK_CLR_BG_MAGENTA "\033[45m" #define PK_CLR_BG_CYAN "\033[46m" #define PK_CLR_BG_WHITE "\033[47m" #define PK_CLR_FG_BRIGHT_BLACK "\033[90m" #define PK_CLR_FG_BRIGHT_RED "\033[91m" #define PK_CLR_FG_BRIGHT_GREEN "\033[92m" #define PK_CLR_FG_BRIGHT_YELLOW "\033[93m" #define PK_CLR_FG_BRIGHT_BLUE "\033[94m" #define PK_CLR_FG_BRIGHT_MAGENTA "\033[95m" #define PK_CLR_FG_BRIGHT_CYAN "\033[96m" #define PK_CLR_FG_BRIGHT_WHITE "\033[97m" #define PK_CLR_BG_BRIGHT_BLACK "\033[100m" #define PK_CLR_BG_BRIGHT_RED "\033[101m" #define PK_CLR_BG_BRIGHT_GREEN "\033[102m" #define PK_CLR_BG_BRIGHT_YELLOW "\033[103m" #define PK_CLR_BG_BRIGHT_BLUE "\033[104m" #define PK_CLR_BG_BRIGHT_MAGENTA "\033[105m" #define PK_CLR_BG_BRIGHT_CYAN "\033[106m" #define PK_CLR_BG_BRIGHT_WHITE "\033[107m" #define PK_Q(x) #x #define PK_QUOTE(x) PK_Q(x) #define PK_CONCAT2(x, y) x##y #define PK_CONCAT(x, y) PK_CONCAT2(x, y) #define PK_HAS_FLAG(val, flag) ((val & flag) == flag) #define PK_CLAMP(val, min, max) (val < min ? min : val > max ? max : val) #define PK_MIN(val, min) (val < min ? val : min) #define PK_MAX(val, max) (val > max ? val : max) #define PK_TO_BIN_PAT PK_Q(%c%c%c%c%c%c%c%c) #define PK_TO_BIN_PAT_8 PK_TO_BIN_PAT #define PK_TO_BIN_PAT_16 PK_TO_BIN_PAT PK_TO_BIN_PAT #define PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_16 PK_TO_BIN_PAT_16 #define PK_TO_BIN_PAT_64 PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_32 #define PK_TO_BIN(byte) \ ((byte) & 0x80 ? '1' : '0'), \ ((byte) & 0x40 ? '1' : '0'), \ ((byte) & 0x20 ? '1' : '0'), \ ((byte) & 0x10 ? '1' : '0'), \ ((byte) & 0x08 ? '1' : '0'), \ ((byte) & 0x04 ? '1' : '0'), \ ((byte) & 0x02 ? '1' : '0'), \ ((byte) & 0x01 ? '1' : '0') #define PK_TO_BIN_8(u8) PK_TO_BIN(u8) #define PK_TO_BIN_16(u16) PK_TO_BIN((u16 >> 8)), PK_TO_BIN((u16 & 0x00FF)) #define PK_TO_BIN_32(u32) PK_TO_BIN_16((u32 >> 16)), PK_TO_BIN_16((u32 & 0x0000FFFF)) #define PK_TO_BIN_64(u64) PK_TO_BIN_32((u64 >> 32)), PK_TO_BIN_32((u64 & 0x00000000FFFFFFFF)) #if defined(__cplusplus) # define CAFE_BABE(T) reinterpret_cast(0xCAFEBABE) #else # define CAFE_BABE(T) (T *)(0xCAFEBABE) #endif #define NULL_CHAR_ARR(v, len) char v[len]; v[0] = '\0'; v[len-1] = '\0'; #define IS_CONSTRUCTIBLE(T) constexpr(std::is_default_constructible::value && !std::is_integral::value && !std::is_floating_point::value) #define IS_DESTRUCTIBLE(T) constexpr(std::is_destructible::value && !std::is_integral::value && !std::is_floating_point::value && !std::is_array::value) #define TypeSafeInt2_H(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ TypeName operator+(const TypeName& a, const TypeName& b); \ TypeName operator-(const TypeName& a, const TypeName& b); \ TypeName operator*(const TypeName& a, const TypeName& b); \ TypeName operator/(const TypeName& a, const TypeName& b); \ TypeName operator&(const TypeName& a, const TypeName& b); \ TypeName operator|(const TypeName& a, const TypeName& b); \ TypeName operator^(const TypeName& a, const TypeName& b); \ TypeName& operator++(TypeName& a); \ TypeName& operator--(TypeName& a); \ TypeName operator++(TypeName& a, int); \ TypeName operator--(TypeName& a, int); \ TypeName operator<<(const TypeName& a, const TypeName& b); \ TypeName operator>>(const TypeName& a, const TypeName& b); \ TypeName operator+=(TypeName& a, const TypeName& b); \ TypeName operator-=(TypeName& a, const TypeName& b); \ TypeName operator*=(TypeName& a, const TypeName& b); \ TypeName operator/=(TypeName& a, const TypeName& b); \ TypeName operator&=(TypeName& a, const TypeName& b); \ TypeName operator|=(TypeName& a, const TypeName& b); \ TypeName operator^=(TypeName& a, const TypeName& b); \ TypeName operator~(TypeName& a); #define TypeSafeInt2_B(TypeName, TypeName_T) \ TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ TypeName operator*(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) * static_cast(b)); \ } \ TypeName operator/(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) / static_cast(b)); \ } \ TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ TypeName operator*=(TypeName& a, const TypeName& b) { \ a = TypeName{a * b}; \ return a; \ }; \ TypeName operator/=(TypeName& a, const TypeName& b) { \ a = TypeName{a / b}; \ return a; \ }; \ TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ TypeName operator~(TypeName& a) { \ TypeName_T b{static_cast(a)}; \ return TypeName{static_cast(~b)}; \ }; #define TypeSafeInt_H(TypeName, Type, Max) \ TypeSafeInt2_H(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #define TypeSafeInt_B(TypeName) \ TypeSafeInt2_B(TypeName, PK_CONCAT(TypeName, _T)) #define TypeSafeInt2_H_constexpr(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \ using TypeName_T = Type; \ enum class TypeName : TypeName_T; \ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \ constexpr TypeName operator+(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) + static_cast(b)); \ } \ constexpr TypeName operator-(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) - static_cast(b)); \ } \ constexpr TypeName operator*(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) * static_cast(b)); \ } \ constexpr TypeName operator/(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) / static_cast(b)); \ } \ constexpr TypeName operator&(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) & static_cast(b)); \ } \ constexpr TypeName operator|(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) | static_cast(b)); \ } \ constexpr TypeName operator^(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) ^ static_cast(b)); \ } \ constexpr TypeName& operator++(TypeName& a) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName& operator--(TypeName& a) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator++(TypeName& a, int) { \ a = a + TypeName{1}; \ return a; \ } \ constexpr TypeName operator--(TypeName& a, int) { \ a = a - TypeName{1}; \ return a; \ }; \ constexpr TypeName operator<<(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) << static_cast(b)); \ }; \ constexpr TypeName operator>>(const TypeName& a, const TypeName& b) { \ return TypeName(static_cast(a) >> static_cast(b)); \ }; \ constexpr TypeName operator+=(TypeName& a, const TypeName& b) { \ a = TypeName{a + b}; \ return a; \ }; \ constexpr TypeName operator-=(TypeName& a, const TypeName& b) { \ a = TypeName{a - b}; \ return a; \ }; \ constexpr TypeName operator*=(TypeName& a, const TypeName& b) { \ a = TypeName{a * b}; \ return a; \ }; \ constexpr TypeName operator/=(TypeName& a, const TypeName& b) { \ a = TypeName{a / b}; \ return a; \ }; \ constexpr TypeName operator&=(TypeName& a, const TypeName& b) { \ a = TypeName{a & b}; \ return a; \ }; \ constexpr TypeName operator|=(TypeName& a, const TypeName& b) { \ a = TypeName{a | b}; \ return a; \ }; \ constexpr TypeName operator^=(TypeName& a, const TypeName& b) { \ a = TypeName{a ^ b}; \ return a; \ }; \ constexpr TypeName operator~(const TypeName& a) { \ TypeName_T b{static_cast(a)}; \ return TypeName{static_cast(~b)}; \ }; #define TypeSafeInt_constexpr(TypeName, Type, Max) \ TypeSafeInt2_H_constexpr(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX)) #endif /* PK_MACROS_H */ #ifndef PK_PKTMPLN_H #define PK_PKTMPLN_H #if defined (__cplusplus) #include template struct pk_tmpln_1 { using FuncType = std::function; FuncType func; static Ret invoke(void *ptr, B1 b1) { auto *self = static_cast(ptr); return self->func(reinterpret_cast(b1)); } }; template struct pk_tmpln_2 { using FuncType = std::function; FuncType func; static Ret invoke(void *ptr, B1 b1, B2 b2) { auto *self = static_cast(ptr); return self->func(reinterpret_cast(b1), reinterpret_cast(b2)); } }; template struct pk_tmpln_3 { using FuncType = std::function; FuncType func; static Ret invoke(void *ptr, B1 b1, B2 b2, B3 b3) { auto *self = static_cast(ptr); return self->func(reinterpret_cast(b1), reinterpret_cast(b2), reinterpret_cast(b3)); } }; #endif #endif /* PK_PKTMPLN_H */ #ifndef PK_MEM_TYPES_H #define PK_MEM_TYPES_H #include typedef uint32_t pk_handle_bucket_index_T; typedef uint32_t pk_handle_item_index_T; enum PK_HANDLE_VALIDATION : uint8_t { PK_HANDLE_VALIDATION_VALID = 0, PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH = 1, PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH = 2, PK_HANDLE_VALIDATION_VALUE_MAX = 3, }; struct pk_handle { pk_handle_bucket_index_T bucketIndex; pk_handle_item_index_T itemIndex; }; #if ! defined(__cplusplus) #define PK_HANDLE_MAX ((struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF }) #else #define PK_HANDLE_MAX (pk_handle{ 0xFFFFFFFF, 0xFFFFFFFF }) #endif enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems); #if defined(__cplusplus) constexpr struct pk_handle pk_handle_MAX_constexpr = { 0xFFFFFFFF, 0xFFFFFFFF }; inline constexpr bool operator==(const pk_handle& lhs, const pk_handle& rhs) { return lhs.bucketIndex == rhs.bucketIndex && lhs.itemIndex == rhs.itemIndex; } template inline constexpr enum PK_HANDLE_VALIDATION pk_handle_validate_constexpr() { if constexpr (handle == pk_handle_MAX_constexpr) return PK_HANDLE_VALIDATION_VALUE_MAX; if constexpr (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if constexpr (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if constexpr (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* __cplusplus */ struct pk_membucket; enum PK_MEMBUCKET_FLAGS : uint64_t { PK_MEMBUCKET_FLAG_NONE = (0), PK_MEMBUCKET_FLAG_TRANSIENT = (1 << 01l), PK_MEMBUCKET_FLAG_ALL = (0xFFFFFFFFFFFFFFFF), }; #endif /* PK_MEM_TYPES_H */ #ifdef PK_IMPL_MEM_TYPES enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems) { if (handle.bucketIndex == PK_HANDLE_MAX.bucketIndex && handle.itemIndex == PK_HANDLE_MAX.itemIndex) return PK_HANDLE_VALIDATION_VALUE_MAX; if (handle.bucketIndex > bucketHandle.bucketIndex) return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; if (handle.itemIndex > maxItems) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; if (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex) return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; return PK_HANDLE_VALIDATION_VALID; } #endif /* PK_IMPL_MEM_TYPES */ #ifndef PK_MEM_H #define PK_MEM_H #include #include #ifndef PK_MEM_DEFAULT_BUCKET_SIZE # define PK_MEM_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL) #endif size_t pk_mem_bucket_calculate_size(size_t sz, size_t reserved_block_count); struct pk_membucket* pk_mem_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags); void pk_mem_bucket_debug_print(struct pk_membucket *bkt); void pk_mem_bucket_destroy(struct pk_membucket* bkt); void pk_mem_bucket_reset(struct pk_membucket* bkt); void pk_mem_bucket_set_client_mem_bucket(struct pk_membucket *bkt); bool pk_mem_bucket_ptr_is_in_mem_bucket(const void* ptr, const struct pk_membucket* bkt); void* pk_new_base(size_t sz, size_t alignment); void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt); void* pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt); void pk_delete_base(const void* ptr, size_t sz); void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt); void pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt); #if defined(__cplusplus) #include #include static inline void stupid_header_warnings_cpp() { (void)std::is_const::value; } template inline T* pk_new(pk_membucket* bucket = NULL) { void* ptr = NULL; if (bucket) { ptr = pk_new_bkt(sizeof(T), alignof(T), bucket); } else { ptr = pk_new_base(sizeof(T), alignof(T)); } if IS_CONSTRUCTIBLE(T) { return new (ptr) T{}; } return reinterpret_cast(ptr); } template inline T* pk_new_arr(long count, pk_membucket* bucket = NULL) { char* ptr = NULL; if (bucket) { ptr = static_cast(pk_new_bkt(sizeof(T) * count, alignof(T), bucket)); } else { ptr = static_cast(pk_new_base(sizeof(T) * count, alignof(T))); } if (ptr == NULL) return NULL; if IS_CONSTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { new (ptr + (i * sizeof(T))) T{}; } } return reinterpret_cast(ptr); } template inline void pk_delete(const T* ptr, pk_membucket* bucket = NULL) { if IS_DESTRUCTIBLE(T) { reinterpret_cast(ptr)->~T(); } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T), bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T)); } } template inline void pk_delete_arr(const T* ptr, long count, pk_membucket* bucket = NULL) { if IS_DESTRUCTIBLE(T) { for (long i = 0; i < count; ++i) { reinterpret_cast(reinterpret_cast(ptr) + (i * sizeof(T)))->~T(); } } if (bucket) { return pk_delete_bkt(static_cast(ptr), sizeof(T) * count, bucket); } else { return pk_delete_base(static_cast(ptr), sizeof(T) * count); } } #endif /* __cplusplus */ #endif /* PK_MEM */ #ifdef PK_IMPL_MEM #include #include #include #include static inline void pkmem_stupid_header_warnings() { (void)stdout; } #if defined(PK_MEMORY_DEBUGGER) #endif #ifndef PK_MINIMUM_ALIGNMENT # define PK_MINIMUM_ALIGNMENT 1 #endif #ifndef PK_MEMORY_DEBUGGER_MAX_BUCKET_COUNT #define PK_MEMORY_DEBUGGER_MAX_BUCKET_COUNT 16 #endif #define EXPECTED_PK_MEMBLOCK_SIZE 128 #define pk_memblock_blocks_idx(bkt, idx) ((bkt->block_capacity-1)-(idx)) #define pk_bkt_data(bkt) ((char*)bkt + EXPECTED_PK_MEMBLOCK_SIZE) #define pk_bkt_head(bkt) ((&bkt->data[0]) + bkt->head) #define pk_bkt_data_sz(bkt) (size_t)((char*)&bkt->blocks[0] - &bkt->data[0]) struct pk_memblock { union { char* data; void* ptr; }; size_t size; }; struct pk_membucket { // 00 mtx_t mtx; // 40 // the total size of the bucket, struct+data size_t size; // 48 // the current head of the bucket: byte offset from `data`. // All currently alloc'd data is before this offset size_t head; // 56 uint32_t block_capacity; // 60 uint32_t block_head_l; // 64 // this should ALWAYS point to the last block containing unalloced space in bkt uint32_t block_head_r; // 68 // the number of active allocations from this bucket // -should correlate to blocks that have a sz > 0 uint32_t alloc_count; // 72 struct pk_memblock *blocks; // 80 enum PK_MEMBUCKET_FLAGS flags; // 88 const char *description; // 96 #ifdef PK_MEMORY_DEBUGGER struct pk_memblock *debug_blocks; // 104 uint32_t debug_head_l; // 108 uint32_t debug_head_r; // 112 uint32_t debug_block_capacity; // 116 char padding[(8*1)+4]; #else char padding[(8*4)]; #endif // 128 // starting point for alloc'd data // data[] is illegal in c++ (though it works in gcc/clang, but so does this) char data[1]; }; static struct pk_membucket *client_bucket = NULL; size_t pk_mem_bucket_calculate_size(size_t sz, size_t reserved_block_count) { size_t base_size = EXPECTED_PK_MEMBLOCK_SIZE + sz + (sizeof(struct pk_memblock) * reserved_block_count); // This trick ensures that our array of pk_memblocks at the end is mem-aligned. // We do, however, still have to do the math when setting the ptr. // Why? the user may have strict memory requirements and didn't call this function. return base_size + (64 - (base_size % 64)); } bool pk_mem_bucket_ptr_is_in_mem_bucket(const void* ptr, const struct pk_membucket* bkt) { return (ptr >= (void*)bkt && ptr < (void*)pk_bkt_head(bkt)); } void pk_mem_bucket_debug_print(struct pk_membucket *bkt) { PK_LOG_INF("pk_membucket details:\n"); PK_LOGV_INF("\tbkt: %p\n", (void *)bkt); PK_LOGV_INF("\tdescription: %s\n", bkt->description); PK_LOGV_INF("\tsize: %lu\n", bkt->size); PK_LOGV_INF("\thead: %lu\n", bkt->head); PK_LOGV_INF("\tallocs: %u\n", bkt->alloc_count); PK_LOGV_INF("\tblock head_l: %u\n", bkt->block_head_l); PK_LOGV_INF("\tblock head_r: %u\n", bkt->block_head_r); PK_LOGV_INF("\tflags: %lu\n", bkt->flags); #ifdef PK_MEMORY_DEBUGGER PK_LOGV_INF("\tdebug alloc head_l: %u\n", bkt->debug_head_l); PK_LOGV_INF("\tdebug alloc head_r: %u\n", bkt->debug_head_r); PK_LOGV_INF("\tdebug cappacity: %u\n", bkt->debug_block_capacity); #endif } struct pk_membucket* pk_mem_bucket_create(const char* description, int64_t sz, enum PK_MEMBUCKET_FLAGS flags) { // 512 example: // [000-127] pk_membucket // [128-191] 64 bytes of data LOL // [192-511] 20 pk_memblocks (20 is worst-case, start 16, 4 per 64 bytes) if ((sz % 64) > 0) { sz += 64 - (sz % 64); } assert(sz >= 512 && "[pkmem.h] bucket too small to track allocation data"); struct pk_membucket* bkt = (struct pk_membucket*)aligned_alloc(64, sz); if (bkt == NULL) return NULL; mtx_init(&bkt->mtx, mtx_plain); bkt->size = sz; bkt->head = 0; bkt->block_capacity = 16; bkt->block_head_l = 0; bkt->block_head_r = 0; bkt->alloc_count = 0; bkt->flags = flags; bkt->description = description; char* blocks_addr = (char*)bkt + sz - (sizeof(struct pk_memblock) * bkt->block_capacity); blocks_addr -= (size_t)blocks_addr % 64; bkt->blocks = (struct pk_memblock*)blocks_addr; bkt->block_capacity = (size_t)(((char*)bkt + sz) - blocks_addr) / sizeof(struct pk_memblock); bkt->blocks[pk_memblock_blocks_idx(bkt,0)].size = pk_bkt_data_sz(bkt); bkt->blocks[pk_memblock_blocks_idx(bkt,0)].ptr = pk_bkt_data(bkt); #ifdef PK_MEMORY_DEBUGGER bkt->debug_head_l = 0; bkt->debug_head_r = 0; bkt->debug_block_capacity = 128; bkt->debug_blocks = (struct pk_memblock*)aligned_alloc(alignof(struct pk_memblock), sizeof(struct pk_memblock) * 128); bkt->debug_blocks[0].ptr = NULL; bkt->debug_blocks[0].size = 0; #endif return bkt; } void pk_mem_bucket_destroy(struct pk_membucket* bkt) { assert(bkt != NULL); #ifdef PK_MEMORY_DEBUGGER if (bkt->debug_blocks != NULL) free(bkt->debug_blocks); #endif free(bkt); } void pk_mem_bucket_reset(struct pk_membucket* bkt) { if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == false) { PK_LOG_ERR("WARNING: pk_bucket_reset called on non-transient pk_membucket\n"); } bkt->head = 0; bkt->block_capacity = 16; char* blocks_addr = (char*)bkt + bkt->size - (sizeof(struct pk_memblock) * bkt->block_capacity); blocks_addr -= (size_t)blocks_addr % 64; bkt->blocks = (struct pk_memblock*)blocks_addr; bkt->block_capacity = (size_t)(((char*)bkt + bkt->size) - blocks_addr) / sizeof(struct pk_memblock); bkt->block_head_l = 0; bkt->block_head_r = 0; bkt->alloc_count = 0; bkt->blocks[pk_memblock_blocks_idx(bkt,0)].size = pk_bkt_data_sz(bkt); bkt->blocks[pk_memblock_blocks_idx(bkt,0)].ptr = pk_bkt_data(bkt); #ifdef PK_MEMORY_DEBUGGER bkt->debug_head_l = 0; bkt->debug_head_r = 0; bkt->debug_blocks[0].ptr = NULL; bkt->debug_blocks[0].size = 0; #endif } void pk_mem_bucket_set_client_mem_bucket(struct pk_membucket *bkt) { client_bucket = bkt; } void pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block) { // 2025-06-03 JCB // Note that this function should only be called if we're INSERTING. // This means that the block will never go at the END of the list - that would be an append. // It can, however, be placed at the beginning, in which case the entire array shifts. struct pk_memblock* new_block = NULL; struct pk_memblock* old_block = NULL; size_t i, k; // 1. resize if needed if (bkt->block_head_r+1 == bkt->block_capacity) { if (bkt->blocks[pk_memblock_blocks_idx(bkt, bkt->block_head_r)].size < sizeof(struct pk_memblock)) { PK_LOG_ERR("[pkmem.h] bkt out of memory when expanding memory blocks."); exit(1); } // this is all that needs done, arr can just grow like this bkt->blocks[pk_memblock_blocks_idx(bkt, bkt->block_head_r)].size -= sizeof(struct pk_memblock); bkt->block_capacity += 1; bkt->blocks -= 1; } // 2. move all blocks forward until we pass the pointer // reminder that these blocks are in REVERSE order for (i = bkt->block_head_r+1; i > 0; --i) { k = pk_memblock_blocks_idx(bkt, i); new_block = &bkt->blocks[k]; old_block = new_block+1; *new_block = *old_block; if (old_block->data < block->data) { break; } } assert(old_block != NULL); if (i == 0 && old_block != NULL) { *old_block = *block; } else { *new_block = *block; } bkt->block_head_r += 1; } void pk_bucket_collapse_blocks(struct pk_membucket* bkt) { // 1. loop through from (rev_idx)0 to head_r, shifting any blocks that have size 0 struct pk_memblock* new_block; struct pk_memblock* old_block; size_t i, ii, bhr; // there's an off by one annoynce here // start with ii = 0 // if we start with ii = 1, we might subtract from block_head_r when nothing was shifted for (i = 0, ii = 0, bhr = bkt->block_head_r; (i + ii) <= bhr; ++i) { new_block = &bkt->blocks[pk_memblock_blocks_idx(bkt, i)]; if (new_block->size > 0) continue; do { old_block = new_block - ii; if (old_block->size == 0) { ii+=1; } else { break; } } while (i + ii <= bhr); *new_block = *old_block; old_block->size = 0; old_block->ptr = NULL; } bkt->block_head_r -= ii; } void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC return malloc(sz); #endif if (sz == 0) return NULL; if (bkt == NULL) return NULL; // TODO some type of error handling if ((bkt->size - bkt->head) < (sz + alignment - 1)) return NULL; size_t i, k; size_t calculatedAlignment = alignment < PK_MINIMUM_ALIGNMENT ? PK_MINIMUM_ALIGNMENT : alignment; size_t misalignment = 0; struct pk_memblock tmp_blk; struct pk_memblock* block = NULL; void* data = NULL; mtx_lock(&bkt->mtx); // find block for (i = 0; i <= bkt->block_head_r; ++i) { k = pk_memblock_blocks_idx(bkt, i); tmp_blk = bkt->blocks[k]; misalignment = (size_t)(tmp_blk.data) % calculatedAlignment; misalignment = (calculatedAlignment - misalignment) % calculatedAlignment; if (tmp_blk.size < sz + misalignment) { continue; } block = &bkt->blocks[k]; break; } if (block == NULL) { mtx_unlock(&bkt->mtx); assert(block != NULL && "memory corruption: not enough space in chosen bkt"); } data = block->data + misalignment; #ifdef PK_MEMORY_DEBUGGER size_t ii; if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == false) { for (i = 0; i < bkt->debug_head_r; ++i) { assert((bkt->debug_blocks[i].size == 0 || (void*)(bkt->debug_blocks[i].data) != data) && "mem address alloc'd twice!"); } i = bkt->debug_head_l; if (bkt->debug_head_l == bkt->debug_head_r) { bkt->debug_head_l++; bkt->debug_head_r++; if (bkt->debug_head_r == bkt->debug_block_capacity) { struct pk_memblock *debug_blocks; debug_blocks = (struct pk_memblock*)aligned_alloc(alignof(struct pk_memblock), sizeof(struct pk_memblock) * (bkt->debug_block_capacity + 128)); assert(debug_blocks != NULL); memcpy(debug_blocks, bkt->debug_blocks, sizeof(struct pk_memblock) * bkt->debug_block_capacity); free(bkt->debug_blocks); bkt->debug_blocks = debug_blocks; bkt->debug_block_capacity += 128; } bkt->debug_blocks[bkt->debug_head_r].ptr = NULL; bkt->debug_blocks[bkt->debug_head_r].size = 0; } else { // 2025-06-05 JCB // This intentionally looks at debug_head_r, which could potentially // be uninitialized. I added some logic elsewhere to ensure that // whenever debug_head_r is incremented, we set the related block // to NULL/0 so that this will catch size==0. // I was experiencing an issue where in testing it was initialized to // NULL/0, but then in a client application it was garbage data. for (ii = bkt->debug_head_l+1; ii <= bkt->debug_head_r; ++ii) { if (bkt->debug_blocks[ii].size == 0) { bkt->debug_head_l = ii; break; } } assert(ii != bkt->debug_head_r+1); } assert(bkt->debug_head_l <= bkt->debug_head_r); bkt->debug_blocks[i].data = (char*)data; bkt->debug_blocks[i].size = sz; } #endif if (block->data == pk_bkt_head(bkt)) { bkt->head += (sz + misalignment); } tmp_blk.data = block->data; tmp_blk.size = misalignment; block->data += misalignment + sz; block->size -= misalignment + sz; if (tmp_blk.size > 0) { pk_bucket_insert_block(bkt, &tmp_blk); } pk_bucket_collapse_blocks(bkt); bkt->alloc_count++; assert(data >= (void*)pk_bkt_data(bkt) && "allocated data is before bucket data"); assert((char*)data <= pk_bkt_data(bkt) + bkt->size && "allocated data is after bucket data"); #ifdef PK_MEMORY_DEBUGGER if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == false) { size_t k; int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = pk_bkt_data_sz(bkt); for (i = 0; i < bkt->debug_head_r; ++i) { debug_tracked_alloc_size += bkt->debug_blocks[i].size; } for (i = 0; i <= bkt->block_head_r; ++i) { k = pk_memblock_blocks_idx(bkt, i); debug_bucket_alloc_size -= bkt->blocks[k].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); memset(data, 0, sz); return data; } void* pk_new_base(size_t sz, size_t alignment) { if (client_bucket == NULL) return NULL; return pk_new_bkt(sz, alignment, client_bucket); } void* pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt) { if (bkt != NULL) return pk_new_bkt(sz, alignment, bkt); return pk_new_base(sz, alignment); } void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt) { #ifdef PK_MEMORY_FORCE_MALLOC #if defined(__cplusplus) std::free(const_cast(ptr)); #else free((void*)ptr); #endif return; #endif size_t i, k; mtx_lock(&bkt->mtx); assert(bkt->alloc_count > 0); assert(pk_mem_bucket_ptr_is_in_mem_bucket(ptr, bkt) && "pointer not in memory bucket range"); assert(sz > 0 && "attempted to free pointer of size 0"); #ifdef PK_MEMORY_DEBUGGER bool found = PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT); struct pk_memblock *debug_memblocks = bkt->debug_blocks; struct pk_memblock *mb; if (found == false) { for (i = bkt->debug_head_r+1; i > 0; --i) { mb = &debug_memblocks[i-1]; if (mb->size == 0) continue; if ((void*)(mb->ptr) == ptr) { assert(mb->size == sz && "[pkmem.h] incorrect free size"); mb->ptr = NULL; mb->size = 0; found = true; if (i <= bkt->debug_head_l) { bkt->debug_head_l = i-1; } if (i == bkt->debug_head_r+1) { if (bkt->debug_head_l == bkt->debug_head_r) { bkt->debug_head_l--; } bkt->debug_head_r--; } assert(bkt->debug_head_l <= bkt->debug_head_r); break; } } } assert(found && "[pkmem.h] double free or invalid ptr"); #endif bkt->alloc_count--; if (bkt->alloc_count == 0) { bkt->head = 0; bkt->block_head_l = 0; bkt->block_head_r = 0; bkt->blocks[pk_memblock_blocks_idx(bkt, 0)].data = pk_bkt_data(bkt); bkt->blocks[pk_memblock_blocks_idx(bkt, 0)].size = pk_bkt_data_sz(bkt); #ifdef PK_MEMORY_DEBUGGER bkt->debug_head_l = 0; bkt->debug_head_r = 0; bkt->debug_blocks[0].data = NULL; bkt->debug_blocks[0].size = 0; #endif mtx_unlock(&bkt->mtx); return; } char* afterPtr = ((char*)(ptr))+sz; struct pk_memblock* tmp_blk = NULL; struct pk_memblock* beforeBlk = NULL; struct pk_memblock* afterBlk = NULL; for (i = bkt->block_head_r+1; i > 0 ; --i) { k = pk_memblock_blocks_idx(bkt, (i-1)); tmp_blk = &bkt->blocks[k]; if (tmp_blk->data + tmp_blk->size == ptr) { beforeBlk = tmp_blk; break; } if (i <= bkt->block_head_r+1 && tmp_blk->data == afterPtr) { afterBlk = tmp_blk; continue; } if (tmp_blk->data < (char*)ptr) { break; } } if (ptr == &bkt->data[0] && afterBlk == NULL && bkt->blocks[pk_memblock_blocks_idx(bkt, 0)].data == afterPtr) { afterBlk = &bkt->blocks[pk_memblock_blocks_idx(bkt, 0)]; } if (afterBlk != NULL && afterBlk->data == pk_bkt_head(bkt)) { bkt->head -= sz; if (beforeBlk != NULL) { bkt->head -= beforeBlk->size; } } if (beforeBlk == NULL && afterBlk == NULL) { struct pk_memblock newBlock; memset(&newBlock, 0, sizeof(struct pk_memblock)); newBlock.data = (char*)ptr; newBlock.size = sz; pk_bucket_insert_block(bkt, &newBlock); } else if (beforeBlk != NULL && afterBlk != NULL) { beforeBlk->size += sz + afterBlk->size; if (beforeBlk->data == pk_bkt_head(bkt)) { bkt->block_head_r--; } afterBlk->size = 0; afterBlk->data = NULL; } else if (beforeBlk != NULL) { beforeBlk->size += sz; } else if (afterBlk != NULL) { afterBlk->data -= sz; afterBlk->size += sz; } pk_bucket_collapse_blocks(bkt); #ifdef PK_MEMORY_DEBUGGER if (PK_HAS_FLAG(bkt->flags, PK_MEMBUCKET_FLAG_TRANSIENT) == false) { int64_t debug_tracked_alloc_size = 0; int64_t debug_bucket_alloc_size = pk_bkt_data_sz(bkt); for (i = 0; i < bkt->debug_head_r; ++i) { debug_tracked_alloc_size += bkt->debug_blocks[i].size; } for (i = 0; i <= bkt->block_head_r; ++i) { k = pk_memblock_blocks_idx(bkt, i); debug_bucket_alloc_size -= bkt->blocks[k].size; } assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!"); } #endif mtx_unlock(&bkt->mtx); } void pk_delete_base(const void* ptr, size_t sz) { pk_delete_bkt(ptr, sz, client_bucket); } void pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt) { if (bkt != NULL) { pk_delete_bkt(ptr, sz, bkt); return; } pk_delete_base(ptr, sz); return; } #endif /* PK_IMPL_MEM */ #ifndef PK_STR_H #define PK_STR_H #include struct pk_str { char *val; uint32_t length; uint32_t reserved; }; struct pk_cstr { const char *val; uint32_t length; uint32_t reserved; }; struct pk_str cstring_to_pk_str(char *s); struct pk_cstr cstring_to_pk_cstr(const char *s); struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s); struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s); struct pk_str pk_str_clone(const struct pk_str *s, struct pk_membucket *bkt); struct pk_cstr pk_cstr_clone(const struct pk_cstr *s, struct pk_membucket *bkt); int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs); int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs); #endif /* PK_STR_H */ #ifdef PK_IMPL_STR #include struct pk_str cstring_to_pk_str(char *s) { struct pk_str ret; ret.val = s; ret.length = (uint32_t)(strlen(s)); ret.reserved = 0; return ret; } struct pk_cstr cstring_to_pk_cstr(const char *s) { struct pk_cstr ret; ret.val = s; ret.length = (uint32_t)(strlen(s)); ret.reserved = 0; return ret; } struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s) { struct pk_str ret; ret.val = (char *)s->val; ret.length = s->length; ret.reserved = s->reserved; return ret; } struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s) { struct pk_cstr ret; ret.val = (char *)s->val; ret.length = s->length; ret.reserved = s->reserved; return ret; } struct pk_str pk_str_clone(const struct pk_str *s, struct pk_membucket *bkt) { struct pk_str str; str.length = s->length == 0 ? strlen(s->val) : s->length; str.reserved = s->length + 1; char *ss = (char*)pk_new(str.reserved * sizeof(char), alignof(char), bkt); strncpy(ss, s->val, str.reserved); str.val = ss; return str; } struct pk_cstr pk_cstr_clone(const struct pk_cstr *s, struct pk_membucket *bkt) { struct pk_cstr str; str.length = s->length == 0 ? strlen(s->val) : s->length; str.reserved = s->length + 1; char *ss = (char*)pk_new(str.reserved * sizeof(char), alignof(char), bkt); strncpy(ss, s->val, str.reserved); str.val = ss; return str; } int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs) { return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length)); } #endif /* PK_IMPL_STR */ #ifndef PK_EV_H #define PK_EV_H #include typedef uint64_t pk_ev_mgr_id_T; typedef uint64_t pk_ev_id_T; typedef uint64_t pk_ev_cb_id_T; const pk_ev_mgr_id_T pk_ev_mgr_id_T_MAX = 0xFFFFFFFFFFFFFFFF; const pk_ev_id_T pk_ev_id_T_MAX = 0xFFFFFFFFFFFFFFFF; const pk_ev_cb_id_T pk_ev_cb_id_T_MAX = 0xFFFFFFFFFFFFFFFF; // TODO re-think threading // note: pk_ev_init() is NOT thread-safe void pk_ev_init(struct pk_membucket *bkt); // note: pk_ev_teardown() is NOT thread-safe void pk_ev_teardown(); pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data); pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data); pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data); void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data); void pk_ev_unregister_ev(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid); void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid); #endif /* PK_EV_H */ #ifdef PK_IMPL_EV #include #include #include #include #include #ifndef PK_EV_INIT_MGR_COUNT # define PK_EV_INIT_MGR_COUNT 1 #endif #ifndef PK_EV_INIT_EV_COUNT # define PK_EV_INIT_EV_COUNT 16 #endif #ifndef PK_EV_INIT_CB_COUNT # define PK_EV_INIT_CB_COUNT 8 #endif #ifndef PK_EV_GROW_RATIO # define PK_EV_GROW_RATIO 1.5 #endif // hard limits // PK_EV_MAX_EV_COUNT would require a refactor for keeping track of used slots #define PK_EV_MAX_EV_COUNT 64 // PK_EV_MAX_CB_COUNT could be increased as desired #define PK_EV_MAX_CB_COUNT 255 #ifndef PK_EV_MEM_ALLOC # define PK_EV_MEM_ALLOC(sz, alignment, bkt) pk_new(sz, alignment, bkt) #endif #ifndef PK_EV_MEM_FREE # define PK_EV_MEM_FREE(ptr, sz, bkt) pk_delete(ptr, sz, bkt) #endif struct pk_ev_cb { pk_ev_cb_fn *cb; void *user_cb_data; }; struct pk_ev { struct pk_ev_cb *ev_cbs; void *user_ev_data; atomic_uint_fast64_t left_ev_cbs; atomic_uint_fast64_t right_ev_cbs; }; struct pk_ev_mgr { struct pk_ev *ev; atomic_uint_fast64_t left_evs; atomic_uint_fast64_t right_evs; atomic_uint_fast64_t unused_evs; // reserved length of `pk_ev`s on this struct atomic_uint_fast64_t rn_ev; // on any given `pk_ev`, the number of callbacks reserved atomic_uint_fast64_t rn_cb; }; struct pk_ev_mstr { atomic_uint_fast64_t flg_mgrs; atomic_uint_fast64_t rn_mgrs; struct pk_ev_mgr **mgrs; mtx_t *mtxs; struct pk_membucket *bkt; }; struct pk_ev_mstr pk_ev_mstr; void pk_ev_init(struct pk_membucket* bkt) { int i; pk_ev_mstr.bkt = bkt; pk_ev_mstr.mgrs = (struct pk_ev_mgr **)PK_EV_MEM_ALLOC(sizeof(void *) * PK_EV_INIT_MGR_COUNT, alignof(void *), bkt); pk_ev_mstr.mtxs = (mtx_t*)PK_EV_MEM_ALLOC(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT, alignof(mtx_t), bkt); memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) { mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain); } atomic_store(&pk_ev_mstr.flg_mgrs, 0lu); atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT); } size_t pk_ev_inner_calc_sz(uint64_t ev_count, uint64_t cb_count, size_t *sz_ev_list, size_t *sz_ev_cb_list) { // base sizes size_t l_sz_ev_list = sizeof(struct pk_ev) * ev_count; size_t l_sz_ev_cb_list = sizeof(struct pk_ev_cb) * cb_count; l_sz_ev_list += ((size_t)64 - alignof(struct pk_ev)) % (size_t)64; l_sz_ev_cb_list += ((size_t)64 - alignof(struct pk_ev_cb)) % (size_t)64; if (sz_ev_list != nullptr) *sz_ev_list = l_sz_ev_list; if (sz_ev_cb_list != nullptr) *sz_ev_cb_list = l_sz_ev_cb_list; size_t ret = sizeof(struct pk_ev_mgr); ret += l_sz_ev_list; ret += l_sz_ev_cb_list * ev_count; return ret; } void pk_ev_teardown() { long unsigned int i; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((atomic_load(&pk_ev_mstr.flg_mgrs) & (1lu << i)) == 0lu) continue; mtx_lock(&pk_ev_mstr.mtxs[i]); size_t sz = pk_ev_inner_calc_sz( atomic_load(&pk_ev_mstr.mgrs[i]->rn_ev), atomic_load(&pk_ev_mstr.mgrs[i]->rn_cb), NULL, NULL ); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[i], sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[i] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[i]); mtx_destroy(&pk_ev_mstr.mtxs[i]); } PK_EV_MEM_FREE(pk_ev_mstr.mgrs, sizeof(void *) * atomic_load(&pk_ev_mstr.rn_mgrs), pk_ev_mstr.bkt); PK_EV_MEM_FREE(pk_ev_mstr.mtxs, sizeof(mtx_t) * atomic_load(&pk_ev_mstr.rn_mgrs), pk_ev_mstr.bkt); pk_ev_mstr.mgrs = NULL; pk_ev_mstr.mtxs = NULL; } static struct pk_ev_mgr* pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { assert(ev_count < 0x100); assert(cb_count < 0x100); uint64_t i; char *ptr; struct pk_ev *ev; size_t sz_ev_list; size_t sz_ev_cb_list; size_t sz_offset; size_t sz = pk_ev_inner_calc_sz(ev_count, cb_count, &sz_ev_list, &sz_ev_cb_list); struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)PK_EV_MEM_ALLOC(sz, alignof(struct pk_ev_mgr), pk_ev_mstr.bkt); if (mgr == NULL) goto early_exit; ptr = ((char *)mgr) + sizeof(struct pk_ev_mgr); sz_offset = (size_t)ptr % alignof(struct pk_ev); ptr += ((size_t)64 - sz_offset) % (size_t)64; mgr->ev = (struct pk_ev*)ptr; atomic_init(&mgr->rn_ev, ev_count); atomic_init(&mgr->rn_cb, cb_count); atomic_init(&mgr->left_evs, 0); atomic_init(&mgr->right_evs, 0); atomic_init(&mgr->unused_evs, 0xFFFFFFFFFFFFFFFF); // find mem-aligned beginning of cb array ptr += sz_ev_list; sz_offset = (size_t)ptr % alignof(struct pk_ev_cb); ptr += ((size_t)64 - sz_offset) % (size_t)64; for (i = 0; i < ev_count; ++i) { ev = &mgr->ev[i]; atomic_init(&ev->left_ev_cbs, 0); atomic_init(&ev->right_ev_cbs, 0); sz_offset = sz_ev_cb_list * i; ev->ev_cbs = (struct pk_ev_cb*)(ptr + sz_offset); } /* debug fprintf(stdout, "[%s] mgr: sz: %lu, ev_count: %lu, cb_count: %lu \n", __FILE__, sz, ev_count, cb_count); fprintf(stdout, "\t%p - ptr\n", (void*)mgr); fprintf(stdout, "\t%p - evs (+%lu)\n", (void*)mgr->ev, (char*)mgr->ev - (char*)mgr); fprintf(stdout, "\t%p - cbs (+%lu)\n", (void*)mgr->ev[0].ev_cbs, (char*)mgr->ev[0].ev_cbs - (char*)mgr); */ early_exit: return mgr; } static void pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) { uint64_t i, ii; uint64_t u, uu; struct pk_ev *ev_old; struct pk_ev *ev; ii = atomic_load(&old->right_evs); atomic_store(&mgr->left_evs, atomic_load(&old->left_evs)); atomic_store(&mgr->right_evs, ii); atomic_store(&mgr->unused_evs, atomic_load(&old->unused_evs)); for (i = 0; i < ii; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; ev->user_ev_data = ev_old->user_ev_data; uu = atomic_load(&ev_old->right_ev_cbs); for (u = 0; u <= uu; ++u) { ev->ev_cbs[u].cb = ev_old->ev_cbs[u].cb; ev->ev_cbs[u].user_cb_data = ev_old->ev_cbs[u].user_cb_data; } atomic_store(&ev->left_ev_cbs, atomic_load(&ev_old->left_ev_cbs)); atomic_store(&ev->right_ev_cbs, atomic_load(&ev_old->right_ev_cbs)); } } pk_ev_mgr_id_T pk_ev_create_mgr() { uint64_t i; pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; pk_ev_mgr_id_T id; struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT); if (mgr == NULL) return -1; start: flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((flg & (1lu << i)) == 0) break; } if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) { goto recreate; } id = i; flg_new |= (1lu << i); if (atomic_compare_exchange_weak(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } pk_ev_mstr.mgrs[id]= mgr; return id; recreate: // TODO recreate mgr, out of space assert(1 == 0 && "[pkev.h] Out of mgr space."); exit(1); goto start; } void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) { pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; assert(evmgr < pk_ev_mstr.rn_mgrs); mtx_lock(&pk_ev_mstr.mtxs[evmgr]); size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = NULL; flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; flg_new &= ~(1lu << evmgr); if (atomic_compare_exchange_weak(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); } pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data) { assert(evmgr < 64); uint64_t new_size; uint64_t i, ii, flg; pk_ev_id_T id; struct pk_ev_mgr *mgr = nullptr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); mgr = pk_ev_mstr.mgrs[evmgr]; if (mgr->left_evs == mgr->right_evs && mgr->right_evs == mgr->rn_ev) { new_size = PK_MAX(2, PK_MIN(PK_EV_MAX_EV_COUNT, mgr->rn_ev * PK_EV_GROW_RATIO)); if (new_size == mgr->rn_ev) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow ev count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(new_size, pk_ev_mstr.mgrs[evmgr]->rn_cb); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = mgr; } id = atomic_load(&mgr->left_evs); flg = atomic_load(&mgr->unused_evs); if (mgr->left_evs != mgr->right_evs) { i = atomic_load(&mgr->left_evs) + 1; ii = atomic_load(&mgr->rn_ev); for (; i <= ii; ++i) { if (flg & (1lu << i)) { break; } } atomic_store(&mgr->left_evs, i); } else { atomic_store(&mgr->left_evs, atomic_load(&mgr->left_evs) + 1); atomic_store(&mgr->right_evs, atomic_load(&mgr->right_evs) + 1); } atomic_store(&mgr->unused_evs, flg & ~(1lu << id)); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); mgr->ev[id].user_ev_data = user_ev_data; return id; } pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data) { assert(evmgr < PK_EV_INIT_MGR_COUNT); bool found = false; uint64_t new_size, i; struct pk_ev_mgr *mgr = nullptr; pk_ev_cb_id_T cb_index; if (pk_ev_mstr.mgrs[evmgr] == nullptr) { PK_LOGV_ERR("[pkev.h] unknown manager: '%lu'.\n", evmgr); exit(1); } for (i = pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (found == false && pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb != nullptr) { found = true; cb_index = i; continue; } if (found == false) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs = i; break; } } if (found == false) { mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) { size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); new_size = PK_MAX(2, PK_MIN(PK_EV_MAX_CB_COUNT, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO)); if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow cb count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, new_size); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = mgr; mgr = nullptr; } cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs++; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); if (cb_index == pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs++; } } pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb; pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data; return cb_index; } void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data) { assert(evmgr < PK_EV_INIT_MGR_COUNT); uint8_t i; for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (pk_ev_mstr.mgrs[evmgr] == nullptr) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) continue; (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)( pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data, pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data, user_emit_data); } } void pk_ev_unregister_ev(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid) { assert(evmgr <= pk_ev_mstr.rn_mgrs); struct pk_ev_mgr *mgr = pk_ev_mstr.mgrs[evmgr]; assert(evid <= mgr->right_evs); if (mgr == nullptr) return; mgr->ev[evid].user_ev_data = NULL; atomic_store(&mgr->ev[evid].left_ev_cbs, 0); atomic_store(&mgr->ev[evid].right_ev_cbs, 0); for (uint64_t u = 0; u < mgr->rn_cb; ++u) { mgr->ev[evid].ev_cbs[u].cb = NULL; mgr->ev[evid].ev_cbs[u].user_cb_data = NULL; } atomic_store(&mgr->unused_evs, atomic_load(&mgr->unused_evs) | (1lu << evid)); if (evid < atomic_load(&mgr->left_evs)) { atomic_store(&mgr->left_evs, evid); } } void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid) { struct pk_ev_mgr *mgr = pk_ev_mstr.mgrs[evmgr]; if (mgr == nullptr) return; if (mgr->ev[evid].left_ev_cbs > cbid) { mgr->ev[evid].left_ev_cbs = cbid; } mgr->ev[evid].ev_cbs[cbid].cb = nullptr; mgr->ev[evid].ev_cbs[cbid].user_cb_data = nullptr; } #endif /* PK_IMPL_EV */ #ifndef PK_PKITER_H #define PK_PKITER_H union pk_iter_id { struct pk_iter_bkt_handle { unsigned int b : 24; unsigned int i : 8; } bkt; struct pk_iter_arr_idx { unsigned int i : 32; } arr; }; struct pk_iter { void *data; union pk_iter_id id; }; #if defined (__cplusplus) template struct pk_iter_t : public pk_iter { operator T*() { return reinterpret_cast(this->data); } T* operator->() { return reinterpret_cast(this->data); } }; #endif #endif /* PK_PKITER_H */ #ifndef PK_PKARR_H #define PK_PKARR_H #include struct pk_arr { uint32_t next; uint32_t reserved; uint32_t stride; uint32_t alignment; struct pk_membucket *bkt; void *data; }; typedef bool(pk_arr_item_compare)(void *user_data, void *item); void pk_arr_clear(struct pk_arr *arr); void pk_arr_reset(struct pk_arr *arr); void pk_arr_reserve(struct pk_arr *arr, uint32_t count); void pk_arr_resize(struct pk_arr *arr, uint32_t count); void pk_arr_move_to_back(struct pk_arr *arr, uint32_t index); void pk_arr_append(struct pk_arr *arr, void *data); void pk_arr_remove_at(struct pk_arr *arr, uint32_t index); void pk_arr_clone(struct pk_arr *lhs, struct pk_arr *rhs); void pk_arr_swap(struct pk_arr *lhs, struct pk_arr *rhs); uint32_t pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn); bool pk_arr_iter_begin(struct pk_arr *arr, struct pk_iter *it); bool pk_arr_iter_end(struct pk_arr *arr, struct pk_iter *it); bool pk_arr_iter_increment(struct pk_arr *arr, struct pk_iter *it); bool pk_arr_iter_decrement(struct pk_arr *arr, struct pk_iter *it); #if defined(__cplusplus) template struct pk_arr_t : public pk_arr { pk_arr_t(); pk_arr_t(struct pk_membucket *bkt); pk_arr_t(const pk_arr_t &other); pk_arr_t(pk_arr_t &&other); pk_arr_t &operator=(const pk_arr_t &other); pk_arr_t &operator=(pk_arr_t &&other); T &operator[](size_t index); }; template pk_arr_t::pk_arr_t() { this->next = 0; this->reserved = 0; this->stride = sizeof(T); this->alignment = alignof(T); this->bkt = NULL; this->data = NULL; } template pk_arr_t::pk_arr_t(struct pk_membucket *bkt) : pk_arr_t() { this->bkt = bkt; } template pk_arr_t::pk_arr_t(const pk_arr_t &other) { // copy ctor pk_arr_clone(static_cast(&const_cast&>(other)), this); } template pk_arr_t::pk_arr_t(pk_arr_t &&other) { // move ctor pk_arr_swap(this, &other); other.data = NULL; } template pk_arr_t & pk_arr_t::operator=(const pk_arr_t &other) { // copy assignment if (this->data != NULL) { pk_arr_reset(this); } pk_arr_clone(static_cast(&const_cast&>(other)), this); return *this; } template pk_arr_t & pk_arr_t::operator=(pk_arr_t &&other) { // move assignment if (this->data != NULL) { pk_arr_reset(this); } pk_arr_swap(this, &other); other.data = NULL; return *this; } template T &pk_arr_t::operator[](size_t index) { if(index >= this->next) throw "pk_arr_t::operator[] out of range"; return reinterpret_cast(this->data)[index]; } template void pk_arr_append_t(pk_arr_t *arr, const T &item) { pk_arr_append(arr, &const_cast(item)); } #endif #endif /* PK_PKARR_H */ #ifdef PK_IMPL_ARR #include #ifndef PK_ARR_GROW_RATIO #define PK_ARR_GROW_RATIO 1.5 #endif #ifndef PK_ARR_INITIAL_COUNT #define PK_ARR_INITIAL_COUNT 16 #endif void pk_arr_clear(struct pk_arr *arr) { arr->next = 0; } void pk_arr_reset(struct pk_arr *arr) { if (arr->data != NULL) pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = NULL; arr->next = 0; arr->reserved = 0; } void pk_arr_reserve(struct pk_arr *arr, uint32_t count) { if (arr->reserved >= count) return; void *new_data = pk_new(arr->stride * count, arr->alignment, arr->bkt); if (arr->data != NULL) { if (arr->next != 0) { memcpy(new_data, arr->data, arr->stride * arr->reserved); } pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); } arr->reserved = count; arr->data = new_data; } void pk_arr_resize(struct pk_arr *arr, uint32_t count) { pk_arr_reserve(arr, count); arr->next = count; } void pk_arr_move_to_back(struct pk_arr *arr, uint32_t index) { if (arr->reserved == 0) return; if (arr->next <= 1) return; #ifdef PK_ARR_MOVE_IN_PLACE uint32_t i, ii; char *target = (char *)pk_new(arr->stride, arr->alignment, arr->bkt); char *buffer = (char *)arr->data; // copy bytes to temp buffer for (ii = 0, i = arr->stride * index; ii < arr->stride; ++ii, ++i) { target[ii] = buffer[i]; } // shift everything forward // arr->stride = 8 // arr->next = 2 // index = 0 // // for (i = 0; i < 8; ++i) { // b[i] = b[i + 8] // } // b[00] = b[08] // b[01] = b[09] // ... // b[07] = b[15] for (i = arr->stride * index; i < (arr->stride * (arr->next - 1)); ++i) { buffer[i] = buffer[i + arr->stride]; } // copy temp buffer back into arr // arr->stride = 8 // arr->next = 2 // index = 0 // // for (ii = 0, i = 8; ii < 8; ++ii, ++i) { // b[i] = t[ii] // } // b[08] = t[00] // b[09] = t[01] // ... // b[15] = t[07] for (ii = 0, i = arr->stride * (arr->next - 1); ii < arr->stride; ++ii, ++i) { buffer[i] = target[ii]; } pk_delete(target, arr->stride, arr->bkt); #else char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt); if (index > 0) { memcpy(new_data, arr->data, arr->stride * index); } memcpy( new_data + (arr->stride * (arr->next - 1)), ((char *)arr->data) + (arr->stride * index), arr->stride); memcpy( new_data + (arr->stride * index), ((char *)arr->data) + (arr->stride * (index + 1)), arr->stride * (arr->next - index - 1)); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = (void *)new_data; #endif } void pk_arr_append(struct pk_arr *arr, void *data) { if (arr->reserved == arr->next) { uint32_t new_count = PK_MAX(arr->reserved == 0 ? PK_ARR_INITIAL_COUNT : arr->reserved * PK_ARR_GROW_RATIO, arr->reserved + 1); void *new_data = pk_new(arr->stride * new_count, arr->alignment, arr->bkt); if (arr->data != NULL) { memcpy(new_data, arr->data, arr->stride * arr->reserved); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); } arr->data = new_data; arr->reserved = new_count; } memcpy(((char *)arr->data) + (arr->stride * arr->next), data, arr->stride); arr->next += 1; return; } void pk_arr_remove_at(struct pk_arr *arr, uint32_t index) { if (arr->reserved == 0) return; if (index == arr->next - 1) { arr->next -=1; return; } #ifdef PK_ARR_MOVE_IN_PLACE uint32_t i; char *buffer = (char *)arr->data; // shift everything forward // arr->stride = 8 // arr->next = 3 // index = 0 // // for (i = 0; i < 16; ++i) { // b[i] = b[i + 8] // } // b[00] = b[08] // b[01] = b[09] // ... // b[15] = b[23] for (i = arr->stride * index; i < arr->stride * arr->next; ++i) { buffer[i] = buffer[i + arr->stride]; } #else char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt); if (index > 0) { memcpy(new_data, arr->data, arr->stride * index); } memcpy( new_data + (arr->stride * index), ((char *)arr->data) + (arr->stride * (index + 1)), arr->stride * (arr->next - index - 1)); pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt); arr->data = (void *)new_data; #endif arr->next -= 1; } void pk_arr_clone(struct pk_arr *lhs, struct pk_arr *rhs) { size_t sz; *rhs = *lhs; if (lhs->data == NULL) return; sz = lhs->stride * lhs->reserved; rhs->data = pk_new(sz, lhs->alignment, lhs->bkt); memcpy(rhs->data, lhs->data, sz); } void pk_arr_swap(struct pk_arr *lhs, struct pk_arr *rhs) { struct pk_arr tmp = *lhs; *lhs = *rhs; *rhs = tmp; } uint32_t pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn) { uint32_t i; char *char_data = (char *)arr->data; for (i = 0; i < arr->next; ++i) { if (fn(user_data, char_data + (arr->stride * i))) return i; } return -1; } bool pk_arr_iter_begin(struct pk_arr *arr, struct pk_iter *it) { it->data = nullptr; it->id.arr.i = 0; if (arr->next > 0 && arr->data != nullptr && arr->data != CAFE_BABE(void)) { it->data = arr->data; return true; } return false; } bool pk_arr_iter_end(struct pk_arr *arr, struct pk_iter *it) { it->data = nullptr; it->id.arr.i = 0; if (arr->next > 0 && arr->data != nullptr && arr->data != CAFE_BABE(void)) { it->id.arr.i = arr->next - 1; it->data = (void *)((char*)arr->data + (arr->stride * it->id.arr.i)); return true; } return false; } bool pk_arr_iter_increment(struct pk_arr *arr, struct pk_iter *it) { if (it->id.arr.i + 1 >= arr->next) { return false; } it->id.arr.i += 1; it->data = (void *)((char*)arr->data + (arr->stride * it->id.arr.i)); return true; } bool pk_arr_iter_decrement(struct pk_arr *arr, struct pk_iter *it) { if (it->id.arr.i == 0) { return false; } it->id.arr.i -= 1; it->data = (void *)((char*)arr->data + (arr->stride * it->id.arr.i)); return true; } #endif /* PK_IMPL_ARR */ #ifndef PK_PK_STN_H #define PK_PK_STN_H #include #include #include #include #include enum PK_STN_RES { PK_STN_RES_SUCCESS, PK_STN_RES_OVERFLOW, PK_STN_RES_UNDERFLOW, PK_STN_RES_INCONVERTIBLE }; enum PK_STN_RES pk_stn_int64_t(int64_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_uint64_t(uint64_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_int32_t(int32_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_uint32_t(uint32_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_int16_t(int16_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_uint16_t(uint16_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_int8_t(int8_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_uint8_t(uint8_t *i, char const *s, char **pEnd, int base); enum PK_STN_RES pk_stn_float(float *f, char const *s, char **pEnd); enum PK_STN_RES pk_stn_double(double *d, char const *s, char **pEnd); #if defined(__cplusplus) template enum PK_STN_RES pk_stn(T *n, char const *s, char **pEnd, int base = 0) { if constexpr(std::is_same::value) { return pk_stn_int64_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_uint64_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_int32_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_uint32_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_int16_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_uint16_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_int8_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_uint8_t(n, s, pEnd, base); } if constexpr(std::is_same::value) { static_assert(sizeof(bool) == sizeof(uint8_t)); static_assert(alignof(bool) == alignof(uint8_t)); return pk_stn_uint8_t((uint8_t*)n, s, pEnd, base); } if constexpr(std::is_same::value) { return pk_stn_float(n, s, pEnd); } if constexpr(std::is_same::value) { return pk_stn_double(n, s, pEnd); } return (PK_STN_RES)-1; } #endif /* defined(__cplusplus) */ #endif /* PK_PK_STN_H */ #ifdef PK_IMPL_STN enum PK_STN_RES pk_stn_int64_t(int64_t *i, char const *s, char **pEnd, int base) { char *end; long long l; errno = 0; l = strtoll(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == LLONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint64_t(uint64_t *i, char const *s, char **pEnd, int base) { char *end; unsigned long long l; errno = 0; l = strtoull(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == ULLONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int32_t(int32_t *i, char const *s, char **pEnd, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint32_t(uint32_t *i, char const *s, char **pEnd, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int16_t(int16_t *i, char const *s, char **pEnd, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint16_t(uint16_t *i, char const *s, char **pEnd, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_int8_t(int8_t *i, char const *s, char **pEnd, int base) { char *end; long l; errno = 0; l = strtol(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == LONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_uint8_t(uint8_t *i, char const *s, char **pEnd, int base) { char *end; unsigned long l; errno = 0; l = strtoul(s, &end, base); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE) { if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW; return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *i = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_float(float *f, char const *s, char **pEnd) { char *end; float l; errno = 0; l = strtof(s, &end); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE && l == HUGE_VALF) { return PK_STN_RES_OVERFLOW; } if (errno == ERANGE && l == -HUGE_VALF) { return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *f = l; return PK_STN_RES_SUCCESS; } enum PK_STN_RES pk_stn_double(double *d, char const *s, char **pEnd) { char *end; double l; errno = 0; l = strtod(s, &end); if (pEnd != nullptr) *pEnd = end; if (errno == ERANGE && l == HUGE_VAL) { return PK_STN_RES_OVERFLOW; } if (errno == ERANGE && l == -HUGE_VAL) { return PK_STN_RES_UNDERFLOW; } if (s == end) { return PK_STN_RES_INCONVERTIBLE; } *d = l; return PK_STN_RES_SUCCESS; } #endif /* PK_IMPL_STN */ #ifndef PK_PKTMR_H #define PK_PKTMR_H #include /* 2024-12-17 JCB * I have read that in more recent Linux kernels, _MONOTONIC and _REALTIME * do not require syscalls, while all of the other calls can. * In testing on my personal machine, this seems to hold true. Using * CLOCK_PROCESS_CPUTIME_ID consistently elapsed thousands of nanoseconds, * even with no work between sequential _start() and _stop() calls. * Meanwhile, the same test with _MONOTONIC elapsed only tens of nanoseconds. */ /* struct pk_tmr */ struct pk_tmr { struct timespec b; // begin struct timespec e; // end }; #ifndef PK_TMR_CLOCK #define PK_TMR_CLOCK CLOCK_MONOTONIC #endif #define pk_tmr_start(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.b); } #define pk_tmr_stop(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.e); } #define pk_tmr_duration_u64_nano(tmr) ((((unsigned long long int)tmr.e.tv_sec * 1000000000llu) + tmr.e.tv_nsec) - (((unsigned long long int)tmr.b.tv_sec * 1000000000llu) + (unsigned long long int)tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_nano(tmr) ((1e+9 * tmr.e.tv_sec + tmr.e.tv_nsec) - (1e+9 * tmr.b.tv_sec + tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_micro(tmr) ((1e+6 * tmr.e.tv_sec + 1e-3 * tmr.e.tv_nsec) - (1e+6 * tmr.b.tv_sec + 1e-3 * tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_mili(tmr) ((1e+3 * tmr.e.tv_sec + 1e-6 * tmr.e.tv_nsec) - (1e+3 * tmr.b.tv_sec + 1e-6 * tmr.b.tv_nsec)) #define pk_tmr_duration_dbl_scnd(tmr) ((tmr.e.tv_sec + 1e-9 * tmr.e.tv_nsec) - (tmr.b.tv_sec + 1e-9 * tmr.b.tv_nsec)) #endif /* PK_PKTMR_H */ #ifndef PK_UUID_H #define PK_UUID_H #include "stddef.h" #include struct pk_uuid { alignas(max_align_t) unsigned char uuid[16]; }; const struct pk_uuid pk_uuid_zed = { .uuid = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }; const struct pk_uuid pk_uuid_max = { .uuid = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF } }; #define pk_uuid_printf_format PK_Q(%.2x%.2x%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x%.2x%.2x%.2x%.2x) #define pk_uuid_printf_var(id) id.uuid[0], id.uuid[1], id.uuid[2], id.uuid[3], id.uuid[4], id.uuid[5], id.uuid[6], id.uuid[7], id.uuid[8], id.uuid[9], id.uuid[10], id.uuid[11], id.uuid[12], id.uuid[13], id.uuid[14], id.uuid[15] void pk_uuid_init(time_t srand_seed); void pk_uuid_teardown(); struct pk_uuid pk_uuid_new_v7(); bool pk_uuid_equals(struct pk_uuid lhs, struct pk_uuid rhs); bool pk_uuid_parse(const char *s, struct pk_uuid *uuid); #if defined(__cplusplus) #include #include std::ostream& operator<<(std::ostream &o, const struct pk_uuid& uuid); std::istream& operator>>(std::istream &i, struct pk_uuid& uuid); const char* operator>>(const char *s, struct pk_uuid& uuid); struct pk_uuid& operator<<(struct pk_uuid& uuid, const char *s); bool operator==(const pk_uuid &lhs, const pk_uuid &rhs); bool operator!=(const pk_uuid &lhs, const pk_uuid &rhs); #endif #endif /* PK_UUID_H */ #ifdef PK_IMPL_UUID #include #include // TODO JCB - 2025-03-19 // This should have platform-specific defines #ifndef PK_UUID_CLOCK #ifdef CLOCK_TAI #define PK_UUID_CLOCK CLOCK_TAI #else #define PK_UUID_CLOCK CLOCK_REALTIME #endif #endif void pk_uuid_init(time_t srand_seed) { // TODO 2025-03-19 - JCB // pk.h should NOT be setting srand. // Replace dependency on rand/srand with a sufficient rand() implementation. // I would prefer if generating a UUID did not advance a global random. // Consider creating a pkrand.h to resolve this. srand(srand_seed); } void pk_uuid_teardown() { } struct pk_uuid pk_uuid_new_v7() { const int n = 1; uint32_t r; // https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-version-7 struct pk_uuid ret; struct timespec t; clock_gettime(PK_UUID_CLOCK, &t); uint32_t sec = (uint32_t)t.tv_sec; uint32_t nsec = (uint32_t)t.tv_nsec; // [000-047] (6 bytes) big-endian unix epoch // TODO test this on a big-endian machine, I don't think this is correct. // This `if` determines if we are big or little endian. // A return value of 1 says we are little endian, so swap the bits. if (*(char *)&n == 1) { ret.uuid[0] = (uint8_t)((sec & 0xFF000000) >> 24); ret.uuid[1] = (uint8_t)((sec & 0x00FF0000) >> 16); ret.uuid[2] = (uint8_t)((sec & 0x0000FF00) >> 8); ret.uuid[3] = (uint8_t)((sec & 0x000000FF) >> 0); ret.uuid[4] = (uint8_t)((nsec & 0x0000FF00) >> 8); ret.uuid[5] = (uint8_t)((nsec & 0x000000FF) >> 0); } else { ret.uuid[0] = (uint8_t)((sec & 0xFF000000) >> 0); ret.uuid[1] = (uint8_t)((sec & 0x00FF0000) >> 8); ret.uuid[2] = (uint8_t)((sec & 0x0000FF00) >> 16); ret.uuid[3] = (uint8_t)((sec & 0x000000FF) >> 24); ret.uuid[4] = (uint8_t)((nsec & 0xFF000000) >> 0); ret.uuid[5] = (uint8_t)((nsec & 0x00FF0000) >> 8); } // [052-127] random r = (uint32_t)rand(); if (*(char *)&n == 1) { ret.uuid[8] = (uint8_t)((r & 0xFF000000) >> 24); ret.uuid[9] = (uint8_t)((r & 0x00FF0000) >> 16); ret.uuid[10] = (uint8_t)((r & 0x0000FF00) >> 8); ret.uuid[11] = (uint8_t)((r & 0x000000FF) >> 0); } else { ret.uuid[8] = (uint8_t)((r & 0xFF000000) >> 0); ret.uuid[9] = (uint8_t)((r & 0x00FF0000) >> 8); ret.uuid[10] = (uint8_t)((r & 0x0000FF00) >> 16); ret.uuid[11] = (uint8_t)((r & 0x000000FF) >> 24); } r = rand(); if (*(char *)&n == 1) { ret.uuid[12] = (uint8_t)((r & 0xFF000000) >> 24); ret.uuid[13] = (uint8_t)((r & 0x00FF0000) >> 16); ret.uuid[14] = (uint8_t)((r & 0x0000FF00) >> 8); ret.uuid[15] = (uint8_t)((r & 0x000000FF) >> 0); } else { ret.uuid[12] = (uint8_t)((r & 0xFF000000) >> 0); ret.uuid[13] = (uint8_t)((r & 0x00FF0000) >> 8); ret.uuid[14] = (uint8_t)((r & 0x0000FF00) >> 16); ret.uuid[15] = (uint8_t)((r & 0x000000FF) >> 24); } ret.uuid[6] = ret.uuid[9] ^ ret.uuid[12]; ret.uuid[7] = ret.uuid[10] ^ ret.uuid[15]; // [048-051] v7 nibble // version must be 0x7_ // 0x70 is 0b01110000 // 0x7F is 0b01111111 ret.uuid[6] |= 0x70; ret.uuid[6] &= 0x7F; // [064-065] 2-bit variant field // variant must be 0b10 // 0x80 is 0b10000000 // 0xBF is 0b10111111 ret.uuid[8] |= 0x80; ret.uuid[8] &= 0xBF; return ret; } bool pk_uuid_equals(struct pk_uuid lhs, struct pk_uuid rhs) { int i; for (i = 0; i < 16; ++i) { if (lhs.uuid[i] != rhs.uuid[i]) return false; } return true; } bool pk_uuid_parse(const char *s, struct pk_uuid *uuid) { // ffffffff-ffff-ffff-ffff-ffffffffffff // 0 8 13 18 23 35 char c[3] = {'\0','\0','\0'}; unsigned char k, kk; if (s == nullptr) goto err_out; for (k = 0, kk = 0; k < 36; k+=2, ++kk) { if (s[k] == '\0' || s[k+1] == '\0') goto err_out; if (k == 8 || k == 13 || k == 18 || k == 23) { if (s[k] != '-') goto err_out; k -= 1; kk -= 1; continue; } c[0] = s[k]; c[1] = s[k+1]; if (pk_stn_uint8_t(&uuid->uuid[kk], c, nullptr, 16) != PK_STN_RES_SUCCESS) { goto err_out; } } return true; err_out: *uuid = pk_uuid_zed; return false; } #if defined(__cplusplus) std::ostream& operator<<(std::ostream &o, const struct pk_uuid& uuid) { int i; std::ios_base::fmtflags orig_flags = o.flags(); auto fill = o.fill(); o << std::hex; for (i = 0; i < 4; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 4; i < 6; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 6; i < 8; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 8; i < 10; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o << "-"; for (i = 10; i < 16; ++i) { o << std::setw(2) << std::setfill('0'); o << (uint16_t)uuid.uuid[i]; } o.fill(fill); o.flags(orig_flags); return o; } std::istream& operator>>(std::istream &i, struct pk_uuid& uuid) { char u[36]; i.read(u, 36); if (i.rdstate() & std::ios::failbit) { goto err_out; } else if (pk_uuid_parse(u, &uuid) == false) { goto err_out; } return i; err_out: uuid = pk_uuid_zed; i.seekg(-36, std::ios_base::cur); i.setstate(std::ios::failbit); return i; } const char * operator>>(const char *s, struct pk_uuid& uuid) { if (pk_uuid_parse(s, &uuid)) { return s+36; } return s; } struct pk_uuid& operator<<(struct pk_uuid& uuid, const char *s) { pk_uuid_parse(s, &uuid); return uuid; } bool operator==(const pk_uuid &lhs, const pk_uuid &rhs) { return pk_uuid_equals(lhs, rhs); } bool operator!=(const pk_uuid &lhs, const pk_uuid &rhs) { return !pk_uuid_equals(lhs, rhs); } #endif #endif #ifndef PK_PKBKTARR_H #define PK_PKBKTARR_H #ifndef PK_BKT_ARR_ALL_UNUSED_VAL #define PK_BKT_ARR_ALL_UNUSED_VAL 0xFFFFFFFFFFFFFFFF #endif #define PK_BKT_ARR_HANDLE_B_MAX 0xFFFFFF #define PK_BKT_ARR_HANDLE_I_MAX 64 typedef bool (pk_bkt_arr_compare_fn)(void *user_data, const void *user_obj_data, const void *arr_obj_data); typedef void (pk_bkt_arr_iterate_fn)(void *user_data, void *arr_obj_data); struct pk_bkt_arr_handle { unsigned int b : 24; unsigned int i : 8; }; #if ! defined(__cplusplus) #define pk_bkt_arr_handle_MAX ((struct pk_bkt_arr_handle){ .b = PK_BKT_ARR_HANDLE_B_MAX, .i = PK_BKT_ARR_HANDLE_I_MAX }) #else #define pk_bkt_arr_handle_MAX (pk_bkt_arr_handle{ .b = PK_BKT_ARR_HANDLE_B_MAX, .i = PK_BKT_ARR_HANDLE_I_MAX }) constexpr struct pk_bkt_arr_handle pk_bkt_arr_handle_MAX_constexpr = pk_bkt_arr_handle_MAX; inline constexpr bool operator==(const pk_bkt_arr_handle &lhs, const pk_bkt_arr_handle &rhs) { return lhs.b == rhs.b && lhs.i == rhs.i; } #endif struct pk_bkt_arr { struct pk_membucket *bkt_buckets; struct pk_membucket *bkt_data; unsigned long long *idx_unused; void **bucketed_data; struct pk_bkt_arr_handle head_l; struct pk_bkt_arr_handle head_r; struct pk_bkt_arr_handle limits; unsigned int reserved_buckets; unsigned long stride; unsigned long alignment; }; enum PK_BKT_ARR_HANDLE_VALIDATION : uint8_t { PK_BKT_ARR_HANDLE_VALIDATION_VALID = 0, PK_BKT_ARR_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH = 1 << 0, PK_BKT_ARR_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH = 2 << 1, }; enum PK_BKT_ARR_HANDLE_VALIDATION pk_bkt_arr_handle_validate(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle); void pk_bkt_arr_init(struct pk_bkt_arr *bkt_arr, unsigned long stride, unsigned long alignment, struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data); void pk_bkt_arr_clear(struct pk_bkt_arr *bkt_arr); void pk_bkt_arr_reserve(struct pk_bkt_arr *bkt_arr, size_t count); struct pk_bkt_arr_handle pk_bkt_arr_find_first_handle(struct pk_bkt_arr *bkt_arr, pk_bkt_arr_compare_fn fn, void *user_data, const void *user_obj_data); void pk_bkt_arr_iterate(struct pk_bkt_arr *bkt_arr, pk_bkt_arr_iterate_fn fn, void *user_data); void pk_bkt_arr_teardown(struct pk_bkt_arr *bkt_arr); struct pk_bkt_arr_handle pk_bkt_arr_new_handle(struct pk_bkt_arr *bkt_arr); void pk_bkt_arr_free_handle(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle); int pk_bkt_arr_handle_compare(struct pk_bkt_arr_handle lhs, struct pk_bkt_arr_handle rhs); struct pk_bkt_arr_handle pk_bkt_arr_handle_increment(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h); struct pk_bkt_arr_handle pk_bkt_arr_handle_decrement(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h); bool pk_bkt_arr_iter_begin(struct pk_bkt_arr *arr, struct pk_iter *it); bool pk_bkt_arr_iter_end(struct pk_bkt_arr *arr, struct pk_iter *it); bool pk_bkt_arr_iter_increment(struct pk_bkt_arr *arr, struct pk_iter *it); bool pk_bkt_arr_iter_decrement(struct pk_bkt_arr *arr, struct pk_iter *it); #if defined (__cplusplus) #include template struct pk_bkt_arr_t : public pk_bkt_arr { pk_bkt_arr_t() = default; pk_bkt_arr_t(struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data); ~pk_bkt_arr_t() = default; T &operator[](struct pk_bkt_arr_handle); using FN_Iter = pk_tmpln_1; using FN_Find = pk_tmpln_2; }; template pk_bkt_arr_t::pk_bkt_arr_t(struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data) { pk_bkt_arr_init(this, sizeof(T), alignof(T), limits, bkt_buckets, bkt_data); } template T &pk_bkt_arr_t::operator[](struct pk_bkt_arr_handle handle) { assert(this->idx_unused != nullptr); assert(this->bucketed_data != nullptr); assert(handle.b <= this->limits.b); assert(handle.i <= this->limits.i); assert(handle.b != this->head_r.b || handle.i <= this->head_r.i); T** two_star_programmer = reinterpret_cast(this->bucketed_data); return two_star_programmer[handle.b][handle.i]; } #endif #endif /* PK_PKBKTARR_H */ #ifdef PK_IMPL_BKTARR #include #include enum PK_BKT_ARR_HANDLE_VALIDATION pk_bkt_arr_handle_validate(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle) { assert(bkt_arr != NULL); uint8_t ret = 0; if (handle.b >= bkt_arr->reserved_buckets || handle.b >= bkt_arr->limits.b) { ret |= PK_BKT_ARR_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH; } if (handle.i >= bkt_arr->limits.i) { ret |= PK_BKT_ARR_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; } if (handle.b == bkt_arr->head_r.b && handle.i > bkt_arr->head_r.i) { ret |= PK_BKT_ARR_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH; } return (enum PK_BKT_ARR_HANDLE_VALIDATION)ret; } void pk_bkt_arr_init(struct pk_bkt_arr *bkt_arr, unsigned long stride, unsigned long alignment, struct pk_bkt_arr_handle limits, struct pk_membucket *bkt_buckets, struct pk_membucket *bkt_data) { assert(limits.b <= PK_BKT_ARR_HANDLE_B_MAX); assert(limits.i <= PK_BKT_ARR_HANDLE_I_MAX); assert(bkt_buckets != nullptr); assert(bkt_data != nullptr); assert(bkt_arr != nullptr); memset(bkt_arr, 0, sizeof(struct pk_bkt_arr)); bkt_arr->bkt_buckets = bkt_buckets; bkt_arr->bkt_data = bkt_data; bkt_arr->head_l.b = 0ul; bkt_arr->head_l.i = 0ul; bkt_arr->head_r.b = 0ul; bkt_arr->head_r.i = 0ul; bkt_arr->limits = limits; bkt_arr->reserved_buckets = 1; bkt_arr->stride = stride; bkt_arr->alignment = alignment; bkt_arr->idx_unused = (unsigned long long *)pk_new_bkt(sizeof(unsigned long long), alignof(unsigned long long), bkt_buckets); bkt_arr->idx_unused[0] = PK_BKT_ARR_ALL_UNUSED_VAL; bkt_arr->bucketed_data = (void **)pk_new_bkt(sizeof(void *), alignof(void *), bkt_buckets); bkt_arr->bucketed_data[0] = pk_new_bkt(stride * limits.i, alignment, bkt_data); } void pk_bkt_arr_clear(struct pk_bkt_arr *bkt_arr) { unsigned int b; bkt_arr->head_l.b = 0; bkt_arr->head_l.i = 0; bkt_arr->head_r.b = 0; bkt_arr->head_r.i = 0; for (b = 0; b < bkt_arr->reserved_buckets; ++b) { bkt_arr->idx_unused[b] = PK_BKT_ARR_ALL_UNUSED_VAL; } } void pk_bkt_arr_reserve(struct pk_bkt_arr *bkt_arr, size_t count) { size_t bucket_count = count / bkt_arr->limits.i; if (bkt_arr->reserved_buckets >= bucket_count) return; unsigned long long *new_idx_unused = (unsigned long long *)pk_new_bkt(sizeof(unsigned long long) * bucket_count, alignof(uint64_t), bkt_arr->bkt_buckets); void **new_bucketed_data = (void **)pk_new_bkt(sizeof(void *) * bucket_count, alignof(void *), bkt_arr->bkt_buckets); if (bkt_arr->reserved_buckets > 0) { memcpy(new_idx_unused, bkt_arr->idx_unused, sizeof(unsigned long long) * bkt_arr->reserved_buckets); memcpy(new_bucketed_data, bkt_arr->bucketed_data, sizeof(void *) * bkt_arr->reserved_buckets); pk_delete_bkt(bkt_arr->bucketed_data, sizeof(void *) * bkt_arr->reserved_buckets, bkt_arr->bkt_buckets); pk_delete_bkt(bkt_arr->idx_unused, sizeof(unsigned long long) * bkt_arr->reserved_buckets, bkt_arr->bkt_buckets); } for (size_t i = bkt_arr->reserved_buckets; i < bucket_count; ++i) { new_idx_unused[i] = PK_BKT_ARR_ALL_UNUSED_VAL; new_bucketed_data[i] = pk_new_bkt(bkt_arr->stride * bkt_arr->limits.i, bkt_arr->alignment, bkt_arr->bkt_data); } bkt_arr->idx_unused = new_idx_unused; bkt_arr->bucketed_data = new_bucketed_data; bkt_arr->reserved_buckets = bucket_count; } struct pk_bkt_arr_handle pk_bkt_arr_find_first_handle(struct pk_bkt_arr *bkt_arr, pk_bkt_arr_compare_fn fn, void *user_data, const void *user_obj_data) { assert(bkt_arr != NULL); assert(fn != NULL); struct pk_bkt_arr_handle ret; unsigned int b, i, ii; ret.b = PK_BKT_ARR_HANDLE_B_MAX; ret.i = PK_BKT_ARR_HANDLE_I_MAX; for (b = 0; b < bkt_arr->reserved_buckets; ++b) { char *arr = ((char**)(bkt_arr->bucketed_data))[b]; ii = b == bkt_arr->reserved_buckets-1 ? bkt_arr->head_r.i : bkt_arr->limits.i; for (i = 0; i < ii; ++i) { if (PK_HAS_FLAG(bkt_arr->idx_unused[b], 1ull << i)) { continue; } if (fn(user_data, user_obj_data, arr+(bkt_arr->stride * i))) { ret.b = b; ret.i = i; return ret; } } } return ret; } void pk_bkt_arr_iterate(struct pk_bkt_arr *bkt_arr, pk_bkt_arr_iterate_fn fn, void *user_data) { assert(bkt_arr != NULL); assert(fn != NULL); unsigned int b, i, ii; for (b = 0; b < bkt_arr->reserved_buckets; ++b) { char *arr = ((char**)(bkt_arr->bucketed_data))[b]; ii = b == bkt_arr->head_r.b ? bkt_arr->head_r.i : bkt_arr->limits.i; for (i = 0; i < ii; ++i) { if (PK_HAS_FLAG(bkt_arr->idx_unused[b], 1ull << i)) { continue; } fn(user_data, arr+(bkt_arr->stride * i)); } } } void pk_bkt_arr_teardown(struct pk_bkt_arr *bkt_arr) { int b; size_t sz = bkt_arr->limits.i * bkt_arr->stride; if (bkt_arr->idx_unused == nullptr && bkt_arr->bucketed_data == nullptr) return; for (b = bkt_arr->reserved_buckets - 1; b > -1; --b) { pk_delete_bkt(bkt_arr->bucketed_data[b], sz, bkt_arr->bkt_data); } pk_delete_bkt((void *)bkt_arr->idx_unused, sizeof(unsigned long long) * (bkt_arr->reserved_buckets), bkt_arr->bkt_buckets); pk_delete_bkt((void *)bkt_arr->bucketed_data, sizeof(void *) * (bkt_arr->reserved_buckets), bkt_arr->bkt_buckets); memset(bkt_arr, 0, sizeof(struct pk_bkt_arr)); bkt_arr->bkt_buckets = NULL; bkt_arr->bkt_data = NULL; bkt_arr->idx_unused = NULL; bkt_arr->bucketed_data = NULL; } struct pk_bkt_arr_handle pk_bkt_arr_new_handle(struct pk_bkt_arr *bkt_arr) { struct pk_bkt_arr_handle ret; unsigned int b, i, ii; assert(bkt_arr != nullptr); // if we have an existing open slot if (pk_bkt_arr_handle_compare(bkt_arr->head_l, bkt_arr->head_r) != 0) { ret = bkt_arr->head_l; for (b = bkt_arr->head_l.b; b < bkt_arr->reserved_buckets; ++b) { if (bkt_arr->idx_unused[b] == 0ull) continue; // I feel like you could do a binary search here, but for 64 elements is it worth it? i = bkt_arr->head_l.b == b ? bkt_arr->head_l.i + 1 : 0; ii = bkt_arr->head_r.b == b ? bkt_arr->head_r.i : PK_MIN(64, bkt_arr->limits.i); for (; i < ii; ++i) { if (bkt_arr->idx_unused[b] & (1ull << i)) { bkt_arr->head_l.b = b; bkt_arr->head_l.i = i; goto done; } } } bkt_arr->head_l = bkt_arr->head_r; goto done; } if (pk_bkt_arr_handle_compare(pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_l), bkt_arr->head_l) == 0 && bkt_arr->reserved_buckets == bkt_arr->limits.b && bkt_arr->idx_unused[bkt_arr->head_r.b] == 0) { PK_LOGV_ERR("[pk_bkt_arr_new_handle] Exceeded bucket limits!: b:%u i:%u\n", bkt_arr->limits.b, bkt_arr->limits.i); exit(1); } if (bkt_arr->head_r.b == bkt_arr->reserved_buckets && bkt_arr->head_r.i == 0) { bkt_arr->reserved_buckets += 1; unsigned long long *new_idx_unused = (unsigned long long *)pk_new_bkt(sizeof(unsigned long long) * bkt_arr->reserved_buckets, alignof(unsigned long long), bkt_arr->bkt_buckets); void **new_data_ptrs = (void **)pk_new_bkt(sizeof(void *) * bkt_arr->reserved_buckets, alignof(void *), bkt_arr->bkt_buckets); for (b = 0; b < bkt_arr->reserved_buckets - 1; ++b) { new_idx_unused[b] = bkt_arr->idx_unused[b]; new_data_ptrs[b] = bkt_arr->bucketed_data[b]; } new_idx_unused[bkt_arr->reserved_buckets - 1] = PK_BKT_ARR_ALL_UNUSED_VAL; new_data_ptrs[bkt_arr->reserved_buckets - 1] = pk_new_bkt(bkt_arr->stride * bkt_arr->limits.i, bkt_arr->alignment, bkt_arr->bkt_data); pk_delete_bkt((void *)bkt_arr->idx_unused, sizeof(unsigned long long) * (bkt_arr->reserved_buckets - 1), bkt_arr->bkt_buckets); pk_delete_bkt((void *)bkt_arr->bucketed_data, sizeof(void *) * (bkt_arr->reserved_buckets - 1), bkt_arr->bkt_buckets); bkt_arr->idx_unused = new_idx_unused; bkt_arr->bucketed_data = new_data_ptrs; } ret = bkt_arr->head_r; bkt_arr->head_r = pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_r); bkt_arr->head_l = pk_bkt_arr_handle_increment(bkt_arr, bkt_arr->head_l); done: bkt_arr->idx_unused[ret.b] &= ~(1ull << ret.i); return ret; } void pk_bkt_arr_free_handle(struct pk_bkt_arr *bkt_arr, struct pk_bkt_arr_handle handle) { assert(bkt_arr != nullptr); assert(pk_bkt_arr_handle_validate(bkt_arr, handle) == PK_BKT_ARR_HANDLE_VALIDATION_VALID); bkt_arr->idx_unused[handle.b] |= (1ull << handle.i); if (handle.b < bkt_arr->head_l.b || (handle.b == bkt_arr->head_l.b && handle.i < bkt_arr->head_l.i)) { bkt_arr->head_l = handle; return; } } int pk_bkt_arr_handle_compare(struct pk_bkt_arr_handle lhs, struct pk_bkt_arr_handle rhs) { if (lhs.b == rhs.b && lhs.i == rhs.i) return 0; if (lhs.b == rhs.b) return (int)rhs.i - (int)lhs.i; return (int)rhs.b - (int)lhs.b; } struct pk_bkt_arr_handle pk_bkt_arr_handle_increment(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h) { h.i += 1; if (arr->limits.i == h.i) { if (h.b + 1 < arr->limits.b) { h.b += 1; h.i = 0; } else { h.i -= 1; } } return h; } struct pk_bkt_arr_handle pk_bkt_arr_handle_decrement(struct pk_bkt_arr *arr, struct pk_bkt_arr_handle h) { if (h.i == 0) { if (h.b != 0) { h.b -= 1; h.i = arr->limits.i; } else { return h; } } h.i -= 1; return h; } bool pk_bkt_arr_iter_begin(struct pk_bkt_arr *arr, struct pk_iter *it) { it->data = nullptr; it->id.bkt.b = 0; it->id.bkt.i = 0; if (arr->head_l.b == 0 && arr->head_l.i == 0 && (arr->head_l.b != arr->head_r.b || arr->head_l.i != arr->head_r.i)) { return pk_bkt_arr_iter_increment(arr, it); } if ((arr->idx_unused[it->id.bkt.b] & (1ull << it->id.bkt.i)) != 0) return false; it->data = (char*)(arr->bucketed_data[it->id.bkt.b]) + (arr->stride * it->id.bkt.i); return true; } bool pk_bkt_arr_iter_end(struct pk_bkt_arr *arr, struct pk_iter *it) { it->data = nullptr; it->id.bkt.b = 0; it->id.bkt.i = 0; if (arr->head_r.b == 0 && arr->head_r.i == 0) return false; do { struct pk_bkt_arr_handle handle = arr->head_r; for (;;) { if ((arr->idx_unused[handle.b] & (1ull << handle.i)) == 0) break; if (handle.b == 0 && handle.i == 0) return false; handle = pk_bkt_arr_handle_decrement(arr, handle); } it->id.bkt.b = handle.b; it->id.bkt.i = handle.i; break; } while (true); if (arr->bucketed_data != nullptr && arr->bucketed_data[it->id.bkt.b] != nullptr) { it->data = (char*)(arr->bucketed_data[it->id.bkt.b]) + (arr->stride * it->id.bkt.i); return true; } return false; } bool pk_bkt_arr_iter_increment(struct pk_bkt_arr *arr, struct pk_iter *it) { struct pk_bkt_arr_handle handle = { .b = it->id.bkt.b, .i = it->id.bkt.i, }; if (it->id.bkt.b == arr->limits.b-1 && it->id.bkt.i == arr->limits.i-1) return false; for (;;) { handle = pk_bkt_arr_handle_increment(arr, handle); if (handle.b >= arr->reserved_buckets) return false; if ((arr->idx_unused[handle.b] & (1ull << handle.i)) == 0) break; } it->id.bkt.b = handle.b; it->id.bkt.i = handle.i; if ((arr->idx_unused[it->id.bkt.b] & (1ull << it->id.bkt.i)) != 0) return false; it->data = (char*)(arr->bucketed_data[it->id.bkt.b]) + (arr->stride * it->id.bkt.i); return true; } bool pk_bkt_arr_iter_decrement(struct pk_bkt_arr *arr, struct pk_iter *it) { struct pk_bkt_arr_handle handle = { .b = it->id.bkt.b, .i = it->id.bkt.i, }; for (;;) { handle = pk_bkt_arr_handle_decrement(arr, handle); if ((arr->idx_unused[handle.b] & (1ull << handle.i)) == 0) break; if (handle.b == 0 && handle.i == 0) break; } if (it->id.bkt.b == handle.b && it->id.bkt.i == handle.i) return false; it->id.bkt.b = handle.b; it->id.bkt.i = handle.i; if ((arr->idx_unused[it->id.bkt.b] & (1ull << it->id.bkt.i)) != 0) return false; it->data = ((char*)(arr->bucketed_data[it->id.bkt.b])) + (arr->stride * it->id.bkt.i); return true; } #endif /* PK_IMPL_BKTARR */ #ifndef PK_PKFUNCINSTR_H #define PK_PKFUNCINSTR_H #include struct pk_funcinstr; struct pk_funcinstr { void *fn; struct pk_tmr tmr; struct pk_funcinstr *parent; struct pk_funcinstr *first_child; struct pk_funcinstr **children; size_t n_children; size_t r_children; }; void pk_funcinstr_init(); void pk_funcinstr_set_ouputs(FILE *out, FILE *err); void pk_funcinstr_teardown(); #if defined(__cplusplus) extern "C" { #endif #if defined(__clang__) // clang #elif defined(__GNUC__) || defined(__GNUG__) #ifndef __USE_GNU #define __USE_GNU #endif #if defined(__cplusplus) #include #endif #include #include #include void __cyg_profile_func_enter(void* this_fn, void* call_site); void __cyg_profile_func_exit(void* this_fn, void* call_site); #else // other #endif #if defined(__cplusplus) } // extern "C" #endif #endif /* PK_PKFUNCINSTR_H */ #if defined(PK_IMPL_FUNCINSTR) #include #include #include #include #define PK_FUNCINSTR_CHILDREN_START_COUNT 8 #define PK_FUNCINSTR_CHILDREN_GROW_RATIO 2.0 #define PK_FUNCINSTR_BKT_START_COUNT 64 #define PK_FUNCINSTR_BKT_GROW_RATIO 2.0 #define PK_FUNCINSTR_BKT_DATA_COUNT 0xFFFF struct pk_funcinstr_bkt { uint16_t used_count; uint8_t guard_enter; uint8_t guard_exit; struct timespec reset_time; struct pk_funcinstr data[PK_FUNCINSTR_BKT_DATA_COUNT+1]; }; struct pk_funcinstr_mstr { mtx_t mtx; FILE *out; FILE *err; struct timespec reset_time; struct pk_funcinstr_bkt **buckets; size_t r_buckets; size_t n_buckets; }; // if NULL, get a new bucket (or alloc if full). if !NULL, existing thread static thread_local struct pk_funcinstr_bkt *pk_funcinstr_thrd_bkt = NULL; // last function call (should be NULL or parent of current) static thread_local struct pk_funcinstr *pk_funcinstr_thrd_instr = NULL; static struct pk_funcinstr_mstr thrd_mstr; __attribute__((no_instrument_function)) void pk_funcinstr_init() { assert(thrd_mstr.out == NULL); assert(thrd_mstr.err == NULL); assert(thrd_mstr.reset_time.tv_sec == 0); assert(thrd_mstr.reset_time.tv_nsec == 0); assert(thrd_mstr.buckets == NULL); assert(thrd_mstr.r_buckets == 0); assert(thrd_mstr.n_buckets == 0); mtx_init(&thrd_mstr.mtx, mtx_plain); thrd_mstr.out = stdout; thrd_mstr.err = stderr; thrd_mstr.r_buckets = PK_FUNCINSTR_BKT_START_COUNT; thrd_mstr.buckets = (struct pk_funcinstr_bkt**)aligned_alloc(alignof(struct pk_funcinstr_bkt *), (sizeof(struct pk_funcinstr_bkt *) * PK_FUNCINSTR_BKT_START_COUNT)); clock_gettime(PK_TMR_CLOCK, &thrd_mstr.reset_time); } __attribute__((no_instrument_function)) void pk_funcinstr_set_ouputs(FILE *out, FILE *err) { thrd_mstr.out = out; thrd_mstr.err = err; } __attribute__((no_instrument_function)) void pk_funcinstr_write(FILE *f) { int64_t i, k, s; struct pk_funcinstr_bkt *bkt = nullptr; struct pk_funcinstr *instr = nullptr; struct pk_tmr fake_tmr; Dl_info info; mtx_lock(&thrd_mstr.mtx); fake_tmr.b = thrd_mstr.reset_time; fprintf(f, "["); for (i = 0; i < (int64_t)thrd_mstr.n_buckets; ++i) { bkt = thrd_mstr.buckets[i]; for (k = 0; k < (int64_t)bkt->used_count; ++k) { instr = &bkt->data[k]; for (s = 0; s < 2; ++s) { if (i == 0 && k == 0 && s == 0) { fprintf(f, "{"); } else { fprintf(f, ",{"); } if (dladdr(instr->fn, &info) != 0) { fprintf(f, "\"name\": \"%s\",", info.dli_sname); } else { fprintf(f, "\"name\": \"unknown\","); } fprintf(f, "\"cat\": \"%s\",", "funcinstr"); if (s == 0) { fake_tmr.e = instr->tmr.b; fprintf(f, "\"ph\": \"%c\",", 'B'); } else { fake_tmr.e = instr->tmr.e; fprintf(f, "\"ph\": \"%c\",", 'E'); } fprintf(f, "\"ts\": %lli,", pk_tmr_duration_u64_nano(fake_tmr)); fprintf(f, "\"pid\": %i,", 69); fprintf(f, "\"tid\": %ld", thrd_current()); fprintf(f, "}"); } } } fprintf(f, "]"); mtx_unlock(&thrd_mstr.mtx); } __attribute__((no_instrument_function)) void pk_funcinstr_teardown() { int64_t i, k; mtx_lock(&thrd_mstr.mtx); for (i = ((int64_t)thrd_mstr.n_buckets)-1; i > -1; --i) { struct pk_funcinstr_bkt *bkt = thrd_mstr.buckets[i]; for (k = ((int64_t)bkt->used_count)-1; k > -1; --k) { free(bkt->data[k].children); } } free(thrd_mstr.buckets); thrd_mstr.out = NULL; thrd_mstr.err = NULL; thrd_mstr.reset_time.tv_sec = 0; thrd_mstr.reset_time.tv_nsec = 0; thrd_mstr.buckets = NULL; thrd_mstr.r_buckets = 0; thrd_mstr.n_buckets = 0; mtx_unlock(&thrd_mstr.mtx); mtx_destroy(&thrd_mstr.mtx); } #if defined(__clang__) // TODO clang XRay // Come up with pk macros since XRay requires attributes to instrument? #elif defined(__GNUC__) || defined(__GNUG__) __attribute__((no_instrument_function)) bool pk_funcinstr_detect_not_initialized() { if (thrd_mstr.buckets == NULL) return true; if (thrd_mstr.r_buckets == 0) return true; return false; } __attribute__((no_instrument_function)) void pk_funcinstr_detect_and_handle_reset() { bool should_hard_reset = false; bool should_reset = pk_funcinstr_thrd_bkt == NULL; if (pk_funcinstr_thrd_bkt != NULL) { should_reset = pk_funcinstr_thrd_bkt->used_count == PK_FUNCINSTR_BKT_DATA_COUNT; should_hard_reset = thrd_mstr.reset_time.tv_sec > pk_funcinstr_thrd_bkt->reset_time.tv_sec; should_hard_reset = should_hard_reset || (thrd_mstr.reset_time.tv_sec == pk_funcinstr_thrd_bkt->reset_time.tv_sec && thrd_mstr.reset_time.tv_nsec > pk_funcinstr_thrd_bkt->reset_time.tv_nsec); } if (should_hard_reset) { if (pk_funcinstr_thrd_bkt != NULL) free(pk_funcinstr_thrd_bkt); pk_funcinstr_thrd_bkt = NULL; pk_funcinstr_thrd_instr = NULL; should_reset = true; } if (should_reset) { if (thrd_mstr.n_buckets == thrd_mstr.r_buckets) { mtx_lock(&thrd_mstr.mtx); thrd_mstr.r_buckets *= PK_FUNCINSTR_BKT_GROW_RATIO; struct pk_funcinstr_bkt **buckets = (struct pk_funcinstr_bkt**)aligned_alloc(alignof(void *), sizeof(void *) * thrd_mstr.r_buckets); memcpy(buckets, thrd_mstr.buckets, sizeof(void *) * (thrd_mstr.n_buckets)); free(thrd_mstr.buckets); thrd_mstr.buckets = buckets; mtx_unlock(&thrd_mstr.mtx); } struct pk_funcinstr_bkt *bkt = (struct pk_funcinstr_bkt *)aligned_alloc(alignof(struct pk_funcinstr_bkt), sizeof(struct pk_funcinstr_bkt)); bkt->used_count = 0; bkt->guard_enter = 0; bkt->guard_exit = 0; bkt->reset_time.tv_sec = 0; bkt->reset_time.tv_nsec = 0; if (pk_funcinstr_thrd_bkt != NULL) { pk_funcinstr_thrd_bkt->guard_enter = 0; pk_funcinstr_thrd_bkt->guard_exit = 0; } pk_funcinstr_thrd_bkt = bkt; mtx_lock(&thrd_mstr.mtx); thrd_mstr.buckets[thrd_mstr.n_buckets++] = bkt; mtx_unlock(&thrd_mstr.mtx); clock_gettime(PK_TMR_CLOCK, &pk_funcinstr_thrd_bkt->reset_time); } } __attribute__((no_instrument_function)) bool pk_funcinstr_should_early_exit() { if (pk_funcinstr_thrd_bkt->guard_enter != 0) return true; if (pk_funcinstr_thrd_bkt->guard_exit != 0) return true; return false; } __attribute__((no_instrument_function)) struct pk_funcinstr *pk_funcinstr_create_funcinstr(void *this_fn) { struct pk_funcinstr *funcinstr = &pk_funcinstr_thrd_bkt->data[pk_funcinstr_thrd_bkt->used_count]; pk_funcinstr_thrd_bkt->used_count++; funcinstr->fn = this_fn; pk_tmr_start(funcinstr->tmr); funcinstr->parent = pk_funcinstr_thrd_instr; funcinstr->first_child = NULL; funcinstr->children = NULL; funcinstr->n_children = 0; funcinstr->r_children = 0; if (pk_funcinstr_thrd_instr != NULL) { if (pk_funcinstr_thrd_instr->first_child == NULL) { // avoid an malloc if n_children will only == 1 pk_funcinstr_thrd_instr->first_child = funcinstr; } else { if (pk_funcinstr_thrd_instr->n_children == pk_funcinstr_thrd_instr->r_children) { if (pk_funcinstr_thrd_instr->r_children == 0) { pk_funcinstr_thrd_instr->r_children = PK_FUNCINSTR_CHILDREN_START_COUNT; } else { pk_funcinstr_thrd_instr->r_children *= PK_FUNCINSTR_CHILDREN_GROW_RATIO; } struct pk_funcinstr **children = (struct pk_funcinstr **)aligned_alloc(alignof(void *), sizeof(void *) * pk_funcinstr_thrd_instr->r_children); if (pk_funcinstr_thrd_instr->children != NULL) { memcpy(children, pk_funcinstr_thrd_instr->children, sizeof(void *) * pk_funcinstr_thrd_instr->n_children); free(pk_funcinstr_thrd_instr->children); } pk_funcinstr_thrd_instr->children = children; if (pk_funcinstr_thrd_instr->n_children == 0) { pk_funcinstr_thrd_instr->children[0] = pk_funcinstr_thrd_instr->first_child; pk_funcinstr_thrd_instr->n_children++; } } pk_funcinstr_thrd_instr->children[pk_funcinstr_thrd_instr->n_children] = funcinstr; pk_funcinstr_thrd_instr->n_children++; } } return funcinstr; } __attribute__((no_instrument_function)) void __cyg_profile_func_enter(void* this_fn, void* call_site) { (void)call_site; if (pk_funcinstr_detect_not_initialized()) return; pk_funcinstr_detect_and_handle_reset(); if (pk_funcinstr_should_early_exit()) return; pk_funcinstr_thrd_bkt->guard_enter++; pk_funcinstr_thrd_instr = pk_funcinstr_create_funcinstr(this_fn); pk_funcinstr_thrd_bkt->guard_enter = 0; } __attribute__((no_instrument_function)) void __cyg_profile_func_exit(void* this_fn, void* call_site) { (void)call_site; if (pk_funcinstr_detect_not_initialized()) return; pk_funcinstr_detect_and_handle_reset(); if (pk_funcinstr_should_early_exit()) return; if (pk_funcinstr_thrd_instr == NULL) return; // exit called before enter? pk_funcinstr_thrd_bkt->guard_exit++; #ifdef PK_FUNCINSTR_PRINT Dl_info info; #endif /* PK_FUNCINSTR_PRINT */ if (this_fn != pk_funcinstr_thrd_instr->fn) { int64_t i = (int64_t)pk_funcinstr_thrd_bkt->used_count - 1; for (; i > -1; --i) { if (pk_funcinstr_thrd_bkt->data[i].fn == this_fn) { if (pk_funcinstr_thrd_bkt->data[i].tmr.e.tv_sec == 0) { pk_funcinstr_thrd_instr = &pk_funcinstr_thrd_bkt->data[i]; break; } } } } if (this_fn != pk_funcinstr_thrd_instr->fn) { if (pk_funcinstr_thrd_instr->parent == NULL) { struct pk_tmr tmr = pk_funcinstr_thrd_instr->tmr; pk_funcinstr_thrd_instr = pk_funcinstr_create_funcinstr(this_fn); pk_funcinstr_thrd_instr->tmr = tmr; #ifdef PK_FUNCINSTR_PRINT fprintf(thrd_mstr.out, "[pkfuncinstr] func mismatch; Parent func? Duration not accurate."); #endif /* PK_FUNCINSTR_PRINT */ } else { #ifdef PK_FUNCINSTR_PRINT fprintf(thrd_mstr.err, "[pkfuncinstr] func mismatch. Last: '"); if (dladdr(pk_funcinstr_thrd_instr->fn, &info) != 0) { fprintf(thrd_mstr.err, "%s", info.dli_sname); } else { fprintf(thrd_mstr.err, "(unknown)"); } fprintf(thrd_mstr.err, "'. Current: '"); if (dladdr(this_fn, &info) != 0) { fprintf(thrd_mstr.err, "%s'.\n", info.dli_sname); } else { fprintf(thrd_mstr.err, "(unknown)'.\n"); } #endif /* PK_FUNCINSTR_PRINT */ pk_funcinstr_thrd_bkt->guard_exit=0; return; } } pk_tmr_stop(pk_funcinstr_thrd_instr->tmr); #ifdef PK_FUNCINSTR_PRINT if (dladdr(this_fn, &info) != 0) { int depth = 0; // TODO track depth in a better way struct pk_funcinstr *p = pk_funcinstr_thrd_instr->parent; while (p != NULL) { depth += 1; p = p->parent; } char *demangled = NULL; if (info.dli_sname != NULL) { #if defined(__cplusplus) demangled = abi::__cxa_demangle(info.dli_sname, NULL, NULL, NULL); #endif } fprintf(thrd_mstr.out, "[pkfuncinstr] %p %*s %s took %.6f ms\n" ,this_fn ,depth, "" ,demangled != NULL ? demangled : info.dli_sname != NULL ? info.dli_sname : "???" ,pk_tmr_duration_dbl_mili(pk_funcinstr_thrd_instr->tmr) ); if (demangled != NULL) free(demangled); } #endif /* PK_FUNCINSTR_PRINT */ pk_funcinstr_thrd_bkt->guard_exit=0; pk_funcinstr_thrd_instr = pk_funcinstr_thrd_instr->parent; } #else // other #endif #endif /* PK_IMPL_FUNCINSTR */ #ifndef PK_PKTST_H #define PK_PKTST_H typedef int (pk_test_func)(); struct pk_test_group; typedef struct pk_test_group *(pk_test_group_get)(); typedef void (pk_test_group_setup)(); typedef void (pk_test_group_teardown)(); typedef void (pk_test_setup)(); typedef void (pk_test_teardown)(); struct pk_test { const char *title; pk_test_func *func; int expected_result; }; struct pk_test_group { const char *title; pk_test_group_setup *group_setup; pk_test_group_teardown *group_teardown; pk_test_setup *test_setup; pk_test_teardown *test_teardown; struct pk_test *tests; unsigned char n_tests; }; void pk_test_run_test_groups(pk_test_group_get **group_get_fns, unsigned long n_groups); #if defined(__cplusplus) #include #define PK_TEST_ASSERT_BODY(expected, value, comp) \ std::cerr << "[pk-test] "; \ std::cerr << "(" << __FILE__ << ":" << __LINE__ << ")"; \ std::cerr << PK_CLR_FG_RED " Failed " PK_CLR_RESET; \ std::cerr << #comp " , Condition: \""; \ std::cerr << PK_CLR_FG_BRIGHT_BLUE << #value << PK_CLR_RESET; \ std::cerr << "\", Expected: \""; \ std::cerr << PK_CLR_FG_GREEN << (expected) << PK_CLR_RESET; \ std::cerr << "\", Got: \""; \ std::cerr << PK_CLR_FG_RED << (value) << PK_CLR_RESET; \ std::cerr << "\"." << std::endl; template inline bool flt_equal(T a, T b, T epsilon) { return abs(a - b) < epsilon; } #else /* __cplusplus */ #include #define PK_TEST_ASSERT_BODY(expected, value, comp) \ fprintf(stderr,"[pk-test] (%s:%i) ", __FILE__, __LINE__); \ fprintf(stderr,"%s ", PK_CLR_FG_RED "Failed" PK_CLR_RESET); \ fprintf(stderr,#comp " : Test condition \""); \ fprintf(stderr,"%s\"\n",PK_CLR_FG_BRIGHT_BLUE #value PK_CLR_RESET); #endif /* __cplusplus */ #define PK_TEST_ASSERT_EQ(expected, value) { \ if ((value) != (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, ==) \ } \ } #define PK_TEST_ASSERT_EQ_RET(expected, value) { \ if ((value) != (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, ==) \ return -1; \ } \ } #define PK_TEST_ASSERT_EQ_EXIT(expected, value) { \ if ((value) != (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, ==) \ exit(1); \ } \ } #define PK_TEST_ASSERT_NEQ(expected, value) { \ if ((value) == (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, !=) \ } \ } #define PK_TEST_ASSERT_NEQ_RET(expected, value) { \ if ((value) == (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, !=) \ return -1; \ } \ } #define PK_TEST_ASSERT_NEQ_EXIT(expected, value) { \ if ((value) == (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, !=) \ exit(1); \ } \ } #define PK_TEST_ASSERT_GT(expected, value) { \ if ((value) <= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >) \ } \ } #define PK_TEST_ASSERT_GT_RET(expected, value) { \ if ((value) <= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >) \ return -1; \ } \ } #define PK_TEST_ASSERT_GT_EXIT(expected, value) { \ if ((value) <= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >) \ exit(1); \ } \ } #define PK_TEST_ASSERT_LT(expected, value) { \ if ((value) >= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <) \ } \ } #define PK_TEST_ASSERT_LT_RET(expected, value) { \ if ((value) >= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <) \ return -1; \ } \ } #define PK_TEST_ASSERT_LT_EXIT(expected, value) { \ if ((value) <= (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <) \ exit(1); \ } \ } #define PK_TEST_ASSERT_GTE(expected, value) { \ if ((value) < (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >=) \ } \ } #define PK_TEST_ASSERT_GTE_RET(expected, value) { \ if ((value) < (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >=) \ return -1; \ } \ } #define PK_TEST_ASSERT_GTE_EXIT(expected, value) { \ if ((value) < (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, >=) \ exit(1); \ } \ } #define PK_TEST_ASSERT_LTE(expected, value) { \ if ((value) > (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <=) \ } \ } #define PK_TEST_ASSERT_LTE_RET(expected, value) { \ if ((value) > (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <=) \ return -1; \ } \ } #define PK_TEST_ASSERT_LTE_EXIT(expected, value) { \ if ((value) > (expected)) { \ PK_TEST_ASSERT_BODY(expected, value, <=) \ exit(1); \ } \ } #ifdef PK_IMPL_TST void pk_test_run_test_groups(pk_test_group_get **group_get_fns, unsigned long n_groups) { int result; unsigned long i; unsigned int k, pass_count, total_test_count, total_test_pass_count, test_group_count, test_group_pass_count; double elapsed_ms, group_ms, total_ms; struct pk_tmr func_tmr, total_tmr; struct pk_test_group *group; fprintf(stdout, "\r\n"); fprintf(stdout, "[pk-test] Begin..\n"); fprintf(stdout, "[pk-test] Running %04ld tests..\n", n_groups); i = 0; total_ms = 0; total_test_count = 0; total_test_pass_count = 0; test_group_count = 0; test_group_pass_count = 0; pk_tmr_start(total_tmr); fprintf(stdout, "\r\n"); for (i = 0; i < n_groups; ++i) { test_group_count += 1; pass_count = 0; group_ms = 0; group = group_get_fns[i](); fprintf(stdout, "[pk-test][%s] Begin..\n", group->title); if (group->group_setup != NULL) (group->group_setup)(); for (k = 0; k < group->n_tests; ++k) { total_test_count += 1; fprintf(stdout, "[pk-test][%s][%s] Begin..\n", group->title, group->tests[k].title); if (group->test_setup != NULL) (group->test_setup)(); pk_tmr_start(func_tmr); result = (group->tests[k].func)(); pk_tmr_stop(func_tmr); elapsed_ms = pk_tmr_duration_dbl_mili(func_tmr); fprintf(stdout, "[pk-test][%s][%s] End.\n", group->title, group->tests[k].title); group_ms += elapsed_ms; total_ms += elapsed_ms; fprintf(stdout, "[pk-test][%s][%s] Elapsed ms: '%f'.\n", group->title, group->tests[k].title, elapsed_ms); if (result == group->tests[k].expected_result){ total_test_pass_count += 1; pass_count += 1; fprintf(stdout, "[pk-test][%s][%s] %sPassed.%s\n", group->title, group->tests[k].title, PK_CLR_FG_GREEN, PK_CLR_RESET); } else { fprintf(stdout, "[pk-test][%s][%s] %sFailed.%s\n", group->title, group->tests[k].title, PK_CLR_FG_RED, PK_CLR_RESET); fprintf(stdout, "[pk-test][%s][%s] Expected: '" PK_CLR_FG_GREEN "%i" PK_CLR_RESET "', Got: '" PK_CLR_FG_RED "%i" PK_CLR_RESET "'.\n", group->title, group->tests[k].title, group->tests[k].expected_result, result); } if (group->test_teardown != NULL) (group->test_teardown)(); } if (group->group_teardown != NULL) (group->group_teardown)(); fprintf(stdout, "[pk-test][%s] End.\n", group->title); fprintf(stdout, "[pk-test][%s] Tests completed: ( %s%04d%s / %04d ).\n", group->title, pass_count == group->n_tests ? PK_CLR_FG_GREEN : PK_CLR_FG_RED, pass_count, PK_CLR_RESET, group->n_tests); fprintf(stdout, "[pk-test][%s] Elapsed ms: '%f'.\n\n", group->title, group_ms); if (pass_count == group->n_tests) { test_group_pass_count += 1; } } pk_tmr_stop(total_tmr); fprintf(stdout, "[pk-test] End.\n"); fprintf(stdout, "[pk-test] Tests completed: ( %s%04d%s / %04d ).\n", total_test_count == total_test_pass_count ? PK_CLR_FG_GREEN : PK_CLR_FG_RED, total_test_pass_count, PK_CLR_RESET, total_test_count); fprintf(stdout, "[pk-test] Test groups completed: ( %s%04d%s / %04d ).\n", test_group_count == test_group_pass_count ? PK_CLR_FG_GREEN : PK_CLR_FG_RED, test_group_pass_count, PK_CLR_RESET, test_group_count); fprintf(stdout, "[pk-test] Elapsed ms: '%f' (test fn sum).\n", total_ms); fprintf(stdout, "[pk-test] Elapsed ms: '%f' (actual).\n\n", pk_tmr_duration_dbl_mili(total_tmr)); } #endif /* PK_IMPL_TST */ #endif /* PK_PKTST_H */ #endif /* PK_SINGLE_HEADER_FILE_H */