summaryrefslogtreecommitdiff
path: root/src/pk.h
diff options
context:
space:
mode:
authorJonathan Bradley <jcb@pikum.xyz>2025-01-09 14:44:31 -0500
committerJonathan Bradley <jcb@pikum.xyz>2025-01-09 14:44:31 -0500
commitb76e309166f714b0a66fb4802f02e92a82d09082 (patch)
tree44244fc0e6f873e1ccf6e1e95e2fec62fcec394a /src/pk.h
parentb04fefe8ee0086bc1404c06b8351ecb4e942f151 (diff)
flatten file structure + rename
Diffstat (limited to 'src/pk.h')
-rw-r--r--src/pk.h1916
1 files changed, 1916 insertions, 0 deletions
diff --git a/src/pk.h b/src/pk.h
new file mode 100644
index 0000000..49b735f
--- /dev/null
+++ b/src/pk.h
@@ -0,0 +1,1916 @@
+#ifndef PK_SINGLE_HEADER_FILE_H
+#define PK_SINGLE_HEADER_FILE_H
+/*******************************************************************************
+* PK Single-Header-Library V0.1.1
+*
+* Author: Jonathan Bradley
+* Copyright: © 2024-2025 Jonathan Bradley
+* Description:
+*
+* A collection of useful programming tools, available for C and C++ as a
+* single-header file. To enable, in ONE single C or C++ file, declare
+* PK_IMPL_ALL before including pk.h.
+*
+* Example:
+*
+* pk.h.include.c
+* ``` c
+* #define PK_IMPL_ALL
+* #include "pk.h"
+* ```
+*
+* It is also possible to enable modules ad-hoc by defining each IMPL
+* individually:
+*
+* pk.h.include.c
+* ``` c
+* # define PK_IMPL_MEM_TYPES
+* # define PK_IMPL_MEM
+* # define PK_IMPL_STR
+* # define PK_IMPL_EV
+* # define PK_IMPL_ARR
+* # define PK_IMPL_STN
+* #include "pk.h"
+* ```
+*
+********************************************************************************
+* pkmacros.h:
+*
+* Provides a set of useful macros for a variety of uses.
+*
+* The macros PK_LOG* provide simple logging utilities. These can be overridden
+* by providing your own implementations of each and defining PK_LOG_OVERRIDE
+* before including pk.h Note that each of these are no-op'd if NDEBUG is
+* defined.
+*
+* The TypeSafeInt_H and TypeSafeInt_B macros provide a way to define
+* type-specific integers, implemented via enums.
+*
+********************************************************************************
+* pkmem-types.h: def PK_IMPL_MEM_TYPES before including pk.h to enable ad-hoc.
+*
+* Provides the types needed by pkmem, as well as a generic pk_handle featuring a
+* bucket+item indexing system.
+*
+********************************************************************************
+* pkmem.h: def PK_IMPL_MEM before including pk.h to enable ad-hoc.
+*
+* A bucketed memory manager. Allows for the creation and management of up to a
+* well-defined number of buckets.
+*
+* Thread safety: Bucket creation and destruction is *not* thread-safe. On the
+* other hand, the "pk_new" and "pk_delete" methods *are* thread-safe, but
+* thread-safety is implemented per-bucket via a single mutex with long-running
+* lock times. PRs for a more performant thread-safe strategy are welcome,
+* complexity and benchmark depending.
+*
+* The following definitions (shown with defaults) can be overridden:
+* PK_DEFAULT_BUCKET_SIZE 256MB (used when bkt is NULL on first call)
+* PK_MINIMUM_ALIGNMENT 1
+* PK_MAXIMUM_ALIGNMENT 64
+* PK_MAX_BUCKET_COUNT 8
+*
+* For debugging purposes, define the following:
+* PK_MEMORY_DEBUGGER : enables a tracking system for all allocs and frees to
+* ensure bucket validity and consistency.
+* PK_MEMORY_FORCE_MALLOC : completely disables pkmem and its debugging features
+* in favor of directly using malloc and free. Useful for out-of-bounds
+* checking.
+*
+********************************************************************************
+* pkstr.h: def PK_IMPL_STR before including pk.h to enable ad-hoc.
+*
+* Provides a simple string structure, allowing the user to track the string
+* length and reserved buffer length. Limits max string length to uint32_t max
+* size, which is roughly 4GB.
+*
+* Tip: set reserved to 0 for compile-time strings as well as for strings alloc'd
+* in a larger buffer (such as bulk-loaded data).
+*
+********************************************************************************
+* pkev.h: def PK_IMPL_EV before including pk.h to enable ad-hoc.
+*
+* Provides a simple event callback system. While the _init and _teardown
+* functions are NOT thread-safe, the _register and _emit functions are.
+* Note: uses malloc.
+*
+* Each mgr is stored contiguously with its data. Consider the following layout:
+* [[mgr][ev 0][ev 1][..][ev N][ev 1 cb array][ev 2 cb array][..][ev N cb array]]
+*
+* The following definitions (shown with defaults) can be overridden:
+* PK_EV_INIT_MGR_COUNT 1
+* PK_EV_INIT_EV_COUNT 16
+* PK_EV_INIT_CB_COUNT 8
+* PK_EV_GROW_RATIO 1.5
+*
+* The number of evs and cbs (per ev) is stored as a uint8_t, so a hard-limit of
+* 255 is to be observed for each. The number of mgrs is stored as a uint64_t.
+*
+* Note that PK_EV_GROW_RATIO is used in two scenarios:
+* 1. When registering an ev on a full mgr.
+* 2. When registering a cb on a full ev.
+* The grow ratio is applied to the ev count and cb count in their respective
+* scenarios. This causes a new allocation for the entire mgr. The existing
+* mgr and its evs and cbs are copied to the new larger buffer space.
+* Explicitly, the number of mgrs does not grow dynamically. Use
+* PK_EV_INIT_MGR_COUNT to control the number of mgrs.
+*
+* Note that increasing PK_EV_INIT_MGR_COUNT isn't recommended, but you may
+* consider doing so if you have specific size or contiguity requirements. For
+* example, you could -DPK_EV_INIT_EV_COUNT=1 to reduce the memory footprint of
+* each event/mgr, and simply create a new mgr for each needed event. Be aware
+* that in this provided scenario a given mgr will still grow if a second EV is
+* registered.
+*
+********************************************************************************
+* pkarr.h: def PK_IMPL_ARR before including pk.h to enable ad-hoc
+*
+* Provides a structure for managing contiguous lists
+*
+* The following definitions (shown with defaults) can be overridden:
+* PK_ARR_INITIAL_COUNT 16
+* PK_ARR_GROW_RATIO 1.5
+* PK_ARR_MOVE_IN_PLACE (not defined)
+*
+* The macro `PK_ARR_MOVE_IN_PLACE` ensures that when possible, the pointer value
+* of `arr->data` is preserved.
+* It is used in the following methods:
+* `pk_arr_move_to_back`
+* `pk_arr_remove_at`
+* This has two additinal benefits:
+* 1. Minimizing the number and `sz` of calls to `pk_new`
+* 2. Ensuring `data[0]` to `data[(N - 1) * stride]` is not copied extraneously
+* to a new buffer.
+* The speed of this will vary depending on usage, platform, and compiler.
+*
+* Initialize `stride`, `alignment`, and `bkt` (optional) members
+* *before* calling any `pk_arr_*` methods.
+*
+* Examples:
+* ``` c
+* struct pk_arr arr = {0};
+* arr.stride = sizeof(obj); // required
+* arr.alignment = alignof(obj); // required
+* arr.bkt = bkt; // optional
+* pk_arr_reserve(&arr, 10); // optional
+* pk_arr_append(&arr, &obj);
+* ```
+* ``` c
+* struct pk_arr arr = {0};
+* arr.stride = sizeof(obj); // required
+* arr.alignment = alignof(obj); // required
+* arr.bkt = bkt; // optional
+* pk_arr_resize(&arr, 10);
+* obj* d = (obj*)arr->data;
+* d[0] = ...;
+* ```
+*
+********************************************************************************
+* pkstn.h: def PK_IMPL_STN before including pk.h to enable ad-hoc.
+*
+* Provides a thorough interface for interacting with the `stoi` family of
+* procedures.
+*
+********************************************************************************
+* pktmr.h: No IMPL define, all methods are macros.
+*
+* Offers a set of `pk_tmr*` macros for elapsed time checking.
+*
+*******************************************************************************/
+
+#define PK_VERSION "0.1.1"
+
+#ifdef PK_IMPL_ALL
+# ifndef PK_IMPL_MEM_TYPES
+# define PK_IMPL_MEM_TYPES
+# endif
+# ifndef PK_IMPL_MEM
+# define PK_IMPL_MEM
+# endif
+# ifndef PK_IMPL_STR
+# define PK_IMPL_STR
+# endif
+# ifndef PK_IMPL_EV
+# define PK_IMPL_EV
+# endif
+# ifndef PK_IMPL_ARR
+# define PK_IMPL_ARR
+# endif
+# ifndef PK_IMPL_STN
+# define PK_IMPL_STN
+# endif
+#endif
+#ifndef PK_MACROS_H
+#define PK_MACROS_H
+
+#ifndef PK_LOG_OVERRIDE
+# ifdef NDEBUG
+# define PK_LOG_ERR(str) (void)str
+# define PK_LOG_INF(str) (void)str
+# define PK_LOGV_ERR(str, ...) (void)str
+# define PK_LOGV_INF(str, ...) (void)str
+# else
+# define PK_LOG_ERR(str, ...) fprintf(stderr, str)
+# define PK_LOG_INF(str, ...) fprintf(stdout, str)
+# define PK_LOGV_ERR(str, ...) fprintf(stderr, str, __VA_ARGS__)
+# define PK_LOGV_INF(str, ...) fprintf(stdout, str, __VA_ARGS__)
+# endif
+#endif
+
+#define PK_Q(x) #x
+#define PK_QUOTE(x) PK_Q(x)
+#define PK_CONCAT2(x, y) x##y
+#define PK_CONCAT(x, y) PK_CONCAT2(x, y)
+
+#define PK_HAS_FLAG(val, flag) ((val & flag) == flag)
+#define PK_CLAMP(val, min, max) (val < min ? min : val > max ? max : val)
+#define PK_MIN(val, min) (val < min ? val : min)
+#define PK_MAX(val, max) (val > max ? val : max)
+
+#define PK_TO_BIN_PAT PK_Q(%c%c%c%c%c%c%c%c)
+#define PK_TO_BIN_PAT_8 PK_TO_BIN_PAT
+#define PK_TO_BIN_PAT_16 PK_TO_BIN_PAT PK_TO_BIN_PAT
+#define PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_16 PK_TO_BIN_PAT_16
+#define PK_TO_BIN_PAT_64 PK_TO_BIN_PAT_32 PK_TO_BIN_PAT_32
+#define PK_TO_BIN(byte) \
+ ((byte) & 0x80 ? '1' : '0'), \
+ ((byte) & 0x40 ? '1' : '0'), \
+ ((byte) & 0x20 ? '1' : '0'), \
+ ((byte) & 0x10 ? '1' : '0'), \
+ ((byte) & 0x08 ? '1' : '0'), \
+ ((byte) & 0x04 ? '1' : '0'), \
+ ((byte) & 0x02 ? '1' : '0'), \
+ ((byte) & 0x01 ? '1' : '0')
+#define PK_TO_BIN_8(u8) PK_TO_BIN(u8)
+#define PK_TO_BIN_16(u16) PK_TO_BIN((u16 >> 8)), PK_TO_BIN(u16 & 0x00FF)
+#define PK_TO_BIN_32(u32) PK_TO_BIN_16((u32 >> 16)), PK_TO_BIN_16(u32 & 0x0000FFFF)
+#define PK_TO_BIN_64(u64) PK_TO_BIN_32((u64 >> 32)), PK_TO_BIN_32(u64 & 0x00000000FFFFFFFF)
+
+#if defined(__cplusplus)
+# define CAFE_BABE(T) reinterpret_cast<T *>(0xCAFEBABE)
+#else
+# define CAFE_BABE(T) (T *)(0xCAFEBABE)
+#endif
+
+#define NULL_CHAR_ARR(v, len) char v[len]; v[0] = '\0'; v[len-1] = '\0';
+
+#define IS_CONSTRUCTIBLE(T) constexpr(std::is_default_constructible<T>::value && !std::is_integral<T>::value && !std::is_floating_point<T>::value)
+#define IS_DESTRUCTIBLE(T) constexpr(std::is_destructible<T>::value && !std::is_integral<T>::value && !std::is_floating_point<T>::value && !std::is_array<T>::value)
+
+#define TypeSafeInt2_H(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \
+ using TypeName_T = Type; \
+ enum class TypeName : TypeName_T; \
+ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \
+ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \
+ TypeName operator+(const TypeName& a, const TypeName& b); \
+ TypeName operator-(const TypeName& a, const TypeName& b); \
+ TypeName operator&(const TypeName& a, const TypeName& b); \
+ TypeName operator|(const TypeName& a, const TypeName& b); \
+ TypeName operator^(const TypeName& a, const TypeName& b); \
+ TypeName& operator++(TypeName& a); \
+ TypeName& operator--(TypeName& a); \
+ TypeName operator++(TypeName& a, int); \
+ TypeName operator--(TypeName& a, int); \
+ TypeName operator<<(const TypeName& a, const TypeName& b); \
+ TypeName operator>>(const TypeName& a, const TypeName& b); \
+ TypeName operator+=(TypeName& a, const TypeName& b); \
+ TypeName operator-=(TypeName& a, const TypeName& b); \
+ TypeName operator&=(TypeName& a, const TypeName& b); \
+ TypeName operator|=(TypeName& a, const TypeName& b); \
+ TypeName operator^=(TypeName& a, const TypeName& b); \
+ TypeName operator~(TypeName& a);
+#define TypeSafeInt2_B(TypeName, TypeName_T) \
+ inline TypeName operator+(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) + static_cast<TypeName_T>(b)); \
+ } \
+ inline TypeName operator-(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) - static_cast<TypeName_T>(b)); \
+ } \
+ inline TypeName operator&(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) & static_cast<TypeName_T>(b)); \
+ } \
+ inline TypeName operator|(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) | static_cast<TypeName_T>(b)); \
+ } \
+ inline TypeName operator^(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) ^ static_cast<TypeName_T>(b)); \
+ } \
+ inline TypeName& operator++(TypeName& a) { \
+ a = a + TypeName{1}; \
+ return a; \
+ } \
+ inline TypeName& operator--(TypeName& a) { \
+ a = a - TypeName{1}; \
+ return a; \
+ }; \
+ inline TypeName operator++(TypeName& a, int) { \
+ a = a + TypeName{1}; \
+ return a; \
+ } \
+ inline TypeName operator--(TypeName& a, int) { \
+ a = a - TypeName{1}; \
+ return a; \
+ }; \
+ inline TypeName operator<<(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) << static_cast<TypeName_T>(b)); \
+ }; \
+ inline TypeName operator>>(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) >> static_cast<TypeName_T>(b)); \
+ }; \
+ inline TypeName operator+=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a + b}; \
+ return a; \
+ }; \
+ inline TypeName operator-=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a - b}; \
+ return a; \
+ }; \
+ inline TypeName operator&=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a & b}; \
+ return a; \
+ }; \
+ inline TypeName operator|=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a | b}; \
+ return a; \
+ }; \
+ inline TypeName operator^=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a ^ b}; \
+ return a; \
+ }; \
+ inline TypeName operator~(TypeName& a) { \
+ a = static_cast<TypeName>(~static_cast<TypeName_T>(a)); \
+ return a; \
+ };
+#define TypeSafeInt_H(TypeName, Type, Max) \
+ TypeSafeInt2_H(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX))
+#define TypeSafeInt_B(TypeName) \
+ TypeSafeInt2_B(TypeName, PK_CONCAT(TypeName, _T))
+
+#define TypeSafeInt2_H_constexpr(TypeName, Type, Max, TypeName_T, TypeName_MAX, TypeName_T_MAX) \
+ using TypeName_T = Type; \
+ enum class TypeName : TypeName_T; \
+ constexpr TypeName_T TypeName_T_MAX = TypeName_T{Max}; \
+ constexpr TypeName TypeName_MAX = TypeName{TypeName_T_MAX}; \
+ constexpr TypeName operator+(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) + static_cast<TypeName_T>(b)); \
+ } \
+ constexpr TypeName operator-(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) - static_cast<TypeName_T>(b)); \
+ } \
+ constexpr TypeName operator&(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) & static_cast<TypeName_T>(b)); \
+ } \
+ constexpr TypeName operator|(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) | static_cast<TypeName_T>(b)); \
+ } \
+ constexpr TypeName operator^(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) ^ static_cast<TypeName_T>(b)); \
+ } \
+ constexpr TypeName& operator++(TypeName& a) { \
+ a = a + TypeName{1}; \
+ return a; \
+ } \
+ constexpr TypeName& operator--(TypeName& a) { \
+ a = a - TypeName{1}; \
+ return a; \
+ }; \
+ constexpr TypeName operator++(TypeName& a, int) { \
+ a = a + TypeName{1}; \
+ return a; \
+ } \
+ constexpr TypeName operator--(TypeName& a, int) { \
+ a = a - TypeName{1}; \
+ return a; \
+ }; \
+ constexpr TypeName operator<<(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) << static_cast<TypeName_T>(b)); \
+ }; \
+ constexpr TypeName operator>>(const TypeName& a, const TypeName& b) { \
+ return TypeName(static_cast<TypeName_T>(a) >> static_cast<TypeName_T>(b)); \
+ }; \
+ constexpr TypeName operator+=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a + b}; \
+ return a; \
+ }; \
+ constexpr TypeName operator-=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a - b}; \
+ return a; \
+ }; \
+ constexpr TypeName operator&=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a & b}; \
+ return a; \
+ }; \
+ constexpr TypeName operator|=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a | b}; \
+ return a; \
+ }; \
+ constexpr TypeName operator^=(TypeName& a, const TypeName& b) { \
+ a = TypeName{a ^ b}; \
+ return a; \
+ }; \
+ constexpr TypeName operator~(const TypeName& a) { \
+ return static_cast<TypeName>(~static_cast<TypeName_T>(a)); \
+ };
+#define TypeSafeInt_constexpr(TypeName, Type, Max) \
+ TypeSafeInt2_H_constexpr(TypeName, Type, Max, PK_CONCAT(TypeName, _T), PK_CONCAT(TypeName, _MAX), PK_CONCAT(TypeName, _T_MAX))
+
+#endif /* PK_MACROS_H */
+#ifndef PK_MEM_TYPES_H
+#define PK_MEM_TYPES_H
+
+#include <stdint.h>
+
+typedef uint32_t pk_handle_bucket_index_T;
+typedef uint32_t pk_handle_item_index_T;
+
+enum PK_HANDLE_VALIDATION : uint8_t {
+ PK_HANDLE_VALIDATION_VALID = 0,
+ PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH = 1,
+ PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH = 2,
+ PK_HANDLE_VALIDATION_VALUE_MAX = 3,
+};
+
+struct pk_handle {
+ pk_handle_bucket_index_T bucketIndex;
+ pk_handle_item_index_T itemIndex;
+};
+
+#define PK_HANDLE_MAX ((struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF })
+
+enum PK_HANDLE_VALIDATION pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems);
+
+#if defined(__cplusplus)
+
+constexpr struct pk_handle pk_handle_MAX_constexpr = (struct pk_handle){ .bucketIndex = 0xFFFFFFFF, .itemIndex = 0xFFFFFFFF };
+
+inline constexpr bool
+operator==(const pk_handle& lhs, const pk_handle& rhs)
+{
+ return lhs.bucketIndex == rhs.bucketIndex && lhs.itemIndex == rhs.itemIndex;
+}
+
+template<const pk_handle handle, const pk_handle bucketHandle, const uint64_t maxItems>
+inline constexpr enum PK_HANDLE_VALIDATION
+pk_handle_validate_constexpr()
+{
+ if constexpr (handle == pk_handle_MAX_constexpr)
+ return PK_HANDLE_VALIDATION_VALUE_MAX;
+ if constexpr (handle.bucketIndex > bucketHandle.bucketIndex)
+ return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH;
+ if constexpr (handle.itemIndex > maxItems)
+ return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH;
+ if constexpr (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex)
+ return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH;
+ return PK_HANDLE_VALIDATION_VALID;
+}
+#endif /* __cplusplus */
+
+struct pk_membucket;
+
+#endif /* PK_MEM_TYPES_H */
+
+#ifdef PK_IMPL_MEM_TYPES
+
+enum PK_HANDLE_VALIDATION
+pk_handle_validate(const struct pk_handle handle, const struct pk_handle bucketHandle, const uint64_t maxItems)
+{
+ if (handle.bucketIndex == PK_HANDLE_MAX.bucketIndex && handle.itemIndex == PK_HANDLE_MAX.itemIndex)
+ return PK_HANDLE_VALIDATION_VALUE_MAX;
+ if (handle.bucketIndex > bucketHandle.bucketIndex)
+ return PK_HANDLE_VALIDATION_BUCKET_INDEX_TOO_HIGH;
+ if (handle.itemIndex > maxItems)
+ return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH;
+ if (handle.bucketIndex == bucketHandle.bucketIndex && handle.itemIndex > bucketHandle.itemIndex)
+ return PK_HANDLE_VALIDATION_ITEM_INDEX_TOO_HIGH;
+ return PK_HANDLE_VALIDATION_VALID;
+}
+
+#endif /* PK_IMPL_MEM_TYPES */
+#ifndef PK_MEM_H
+#define PK_MEM_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#ifndef PK_DEFAULT_BUCKET_SIZE
+# define PK_DEFAULT_BUCKET_SIZE (1ULL * 1024ULL * 1024ULL * 256ULL)
+#endif
+#ifndef PK_MINIMUM_ALIGNMENT
+# define PK_MINIMUM_ALIGNMENT 1
+#endif
+#ifndef PK_MAXIMUM_ALIGNMENT
+# define PK_MAXIMUM_ALIGNMENT 64
+#endif
+
+struct pk_membucket* pk_bucket_create(const char* description, int64_t sz, bool transient);
+void pk_bucket_destroy(struct pk_membucket* bkt);
+void pk_bucket_reset(struct pk_membucket* bkt);
+
+void pk_memory_debug_print();
+void pk_memory_flush();
+void pk_memory_teardown_all();
+bool pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt);
+
+void* pk_new_base(size_t sz, size_t alignment);
+void* pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt);
+void* pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt);
+void pk_delete_base(const void* ptr, size_t sz);
+void pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt);
+void pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt);
+
+#if defined(__cplusplus)
+
+#include <type_traits>
+
+static inline void stupid_header_warnings_cpp() { (void)std::is_const<void>::value; }
+
+template <typename T>
+inline T*
+pk_new(pk_membucket* bucket = nullptr)
+{
+ void* ptr = nullptr;
+ if (bucket) {
+ ptr = pk_new_bkt(sizeof(T), alignof(T), bucket);
+ } else {
+ ptr = pk_new_base(sizeof(T), alignof(T));
+ }
+ if IS_CONSTRUCTIBLE(T) {
+ return new (ptr) T{};
+ }
+ return reinterpret_cast<T*>(ptr);
+}
+
+template <typename T>
+inline T*
+pk_new(long count, pk_membucket* bucket = nullptr)
+{
+ char* ptr = nullptr;
+ if (bucket) {
+ ptr = static_cast<char*>(pk_new_bkt(sizeof(T) * count, alignof(T), bucket));
+ } else {
+ ptr = static_cast<char*>(pk_new_base(sizeof(T) * count, alignof(T)));
+ }
+ if IS_CONSTRUCTIBLE(T) {
+ for (long i = 0; i < count; ++i) {
+ new (ptr + (i * sizeof(T))) T{};
+ }
+ }
+ return reinterpret_cast<T*>(ptr);
+}
+
+template <typename T>
+inline void
+pk_delete(const T* ptr, pk_membucket* bucket = nullptr)
+{
+ if IS_DESTRUCTIBLE(T) {
+ reinterpret_cast<const T*>(ptr)->~T();
+ }
+ if (bucket) {
+ return pk_delete_bkt(static_cast<const void*>(ptr), sizeof(T), bucket);
+ } else {
+ return pk_delete_base(static_cast<const void*>(ptr), sizeof(T));
+ }
+}
+
+template <typename T>
+inline void
+pk_delete(const T* ptr, long count, pk_membucket* bucket = nullptr)
+{
+ if IS_DESTRUCTIBLE(T) {
+ for (long i = 0; i < count; ++i) {
+ reinterpret_cast<const T*>(reinterpret_cast<const char*>(ptr) + (i * sizeof(T)))->~T();
+ }
+ }
+ if (bucket) {
+ return pk_delete_bkt(static_cast<const void*>(ptr), sizeof(T) * count, bucket);
+ } else {
+ return pk_delete_base(static_cast<const void*>(ptr), sizeof(T) * count);
+ }
+}
+
+#endif /* __cplusplus */
+
+#endif /* PK_MEM */
+
+#ifdef PK_IMPL_MEM
+
+#include <string.h>
+#include <stdio.h>
+#include <threads.h>
+#include <assert.h>
+
+static inline void pkmem_stupid_header_warnings() { (void)stdout; }
+
+#if defined(PK_MEMORY_DEBUGGER)
+/*
+ * Note that certain aspects of this expect that you only have one non-transient bucket.
+ * If you need to track multiple non-transient buckets, these sections will need a refactor.
+ */
+#endif
+
+#ifndef PK_MAX_BUCKET_COUNT
+# define PK_MAX_BUCKET_COUNT 8
+#endif
+
+struct pk_memblock {
+ char* data;
+ size_t size;
+};
+
+struct pk_membucket {
+ // the total size of the bucket, `blocks+ptr`
+ int64_t size;
+ // the current head of the bucket: byte offset from `ptr`.
+ // All currently alloc'd data is before this offset
+ int64_t head;
+ // amount of lost bytes in this membucket, hopefully zero
+ int64_t lostBytes;
+ // the number of active allocations from this bucket
+ int64_t allocs;
+ // the index of the last empty block.
+ // Should always point to `pk_memblock{ .data = ptr+head, .size=size-head }`
+ int64_t lastEmptyBlockIndex;
+ // number of pk_memblocks in the `*blocks` array
+ int64_t maxBlockCount;
+ // ptr to an array of pk_memblock to track ALL free space between ptr and ptr+sz
+ struct pk_memblock* blocks;
+ // starting point for alloc'd data
+ union {
+ char* ptr;
+ void* raw;
+ };
+ const char* description;
+ mtx_t mtx;
+ bool transient;
+};
+
+static struct pk_membucket pk_buckets[PK_MAX_BUCKET_COUNT];
+static int64_t pk_bucket_head = 0;
+
+#ifdef PK_MEMORY_DEBUGGER
+struct pk_dbg_memblock {
+ struct pk_memblock blk;
+ struct pk_membucket *bkt;
+};
+static struct pk_dbg_memblock debug_all_allocs[1024 * 1024];
+static int64_t debug_alloc_head = 0;
+static bool has_init_debug = false;
+#endif
+
+bool
+pk_memory_is_in_bucket(const void* ptr, const struct pk_membucket* bkt)
+{
+ if (ptr >= bkt->raw && (const char*)ptr < bkt->ptr + bkt->size) return true;
+ return false;
+}
+
+void
+pk_memory_debug_print()
+{
+ PK_LOGV_INF("Memory Manager printout:\nBucket count: %li\n", pk_bucket_head);
+ for (long i = 0; i < pk_bucket_head; ++i) {
+ PK_LOGV_INF("- bucket #%li\n", i);
+ PK_LOGV_INF("\tdescription: %s\n", pk_buckets[i].description);
+ PK_LOGV_INF("\tsize: %li\n", pk_buckets[i].size);
+ PK_LOGV_INF("\thead: %li\n", pk_buckets[i].head);
+ PK_LOGV_INF("\tlostBytes: %li\n", pk_buckets[i].lostBytes);
+ PK_LOGV_INF("\tallocs: %li\n", pk_buckets[i].allocs);
+ PK_LOGV_INF("\tlastEmptyBlockIndex: %li\n", pk_buckets[i].lastEmptyBlockIndex);
+ PK_LOGV_INF("\tmaxBlockCount: %li\n", pk_buckets[i].maxBlockCount);
+ PK_LOGV_INF("\tblocks: %p\n", pk_buckets[i].blocks);
+ PK_LOGV_INF("\tptr: %p\n", pk_buckets[i].ptr);
+ PK_LOGV_INF("\ttransient: %i\n", pk_buckets[i].transient);
+#ifdef PK_MEMORY_DEBUGGER
+ uint64_t count = 0;
+ for (int64_t d = 0; d < debug_alloc_head; ++d) {
+ if (debug_all_allocs[d].bkt == &pk_buckets[d] && debug_all_allocs[d].blk.size > 0) {
+ count += 1;
+ }
+ }
+ PK_LOGV_INF("\tdebug alloc count: %lu\n", count);
+ PK_LOGV_INF("\tdebug alloc last: %lu\n", debug_alloc_head);
+#endif
+ }
+}
+
+void
+pk_memory_flush()
+{
+ for (long i = pk_bucket_head - 1; i > -1; --i) {
+ if (pk_buckets[i].head != 0) break;
+ if (pk_buckets[i].transient == true) break;
+ pk_bucket_head--;
+ if (pk_buckets[i].raw == CAFE_BABE(void)) continue;
+ pk_bucket_destroy(&pk_buckets[i]);
+ }
+}
+
+void
+pk_memory_teardown_all()
+{
+ for (int64_t i = pk_bucket_head; i > 0; --i) {
+ if (pk_buckets[i - 1].ptr == nullptr) continue;
+ if (pk_buckets[i - 1].ptr == CAFE_BABE(char)) continue;
+ pk_bucket_destroy(&pk_buckets[i - 1]);
+ }
+ pk_bucket_head = 0;
+}
+
+static int64_t
+pk_bucket_create_inner(int64_t sz, bool transient, const char* description)
+{
+ assert(pk_bucket_head < PK_MAX_BUCKET_COUNT && "pkmem.h: reserved bucket count exceeded");
+#ifdef PK_MEMORY_DEBUGGER
+ if (has_init_debug == false) {
+ has_init_debug = true;
+ memset(debug_all_allocs, 0, sizeof(struct pk_dbg_memblock) * 1024 * 1024);
+ }
+#endif
+ int64_t blockCount = sz * 0.01;
+ struct pk_membucket* bkt = &pk_buckets[pk_bucket_head];
+ bkt->size = sz;
+ bkt->head = 0;
+ bkt->lostBytes = 0;
+ bkt->allocs = 0;
+ bkt->lastEmptyBlockIndex = 0;
+ bkt->maxBlockCount = blockCount < 10 ? 10 : blockCount;
+ bkt->blocks = (struct pk_memblock*)malloc(sz);
+ mtx_init(&bkt->mtx, mtx_plain);
+ assert(bkt->blocks != nullptr && "failed to allocate memory");
+#if 1
+ memset(bkt->blocks, 0, sz);
+#endif
+ bkt->ptr = ((char*)(bkt->blocks)) + (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+ size_t misalignment = (uint64_t)(bkt->ptr) % PK_MAXIMUM_ALIGNMENT;
+ if (misalignment != 0) {
+ size_t moreBlocks = misalignment / sizeof(struct pk_memblock);
+ bkt->maxBlockCount += moreBlocks;
+ bkt->ptr += (PK_MAXIMUM_ALIGNMENT - misalignment);
+ }
+ bkt->description = description;
+ bkt->transient = transient;
+ struct pk_memblock* memBlock = (struct pk_memblock*)(bkt->blocks);
+ memBlock->data = bkt->ptr;
+ memBlock->size = sz - (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+ return pk_bucket_head++;
+}
+
+struct pk_membucket*
+pk_bucket_create(const char* description, int64_t sz, bool transient)
+{
+ return &pk_buckets[pk_bucket_create_inner(sz, transient, description)];
+}
+
+void
+pk_bucket_destroy(struct pk_membucket* bkt)
+{
+ int64_t i;
+ for (i = 0; i < pk_bucket_head; ++i) {
+ if (&pk_buckets[i] == bkt) {
+ if (pk_bucket_head == i + 1)
+ pk_bucket_head--;
+ break;
+ }
+ }
+ free(bkt->blocks);
+ bkt->size = 0;
+ bkt->head = 0;
+ bkt->lostBytes = 0;
+ bkt->allocs = 0;
+ bkt->lastEmptyBlockIndex = -1;
+ bkt->maxBlockCount = 0;
+ bkt->blocks = CAFE_BABE(struct pk_memblock);
+ bkt->ptr = CAFE_BABE(char);
+ bkt->transient = false;
+ mtx_destroy(&bkt->mtx);
+#ifdef PK_MEMORY_DEBUGGER
+ for (i = debug_alloc_head; i > -1; --i) {
+ if (debug_all_allocs[i].bkt == bkt) {
+ debug_all_allocs[i].blk.data = NULL;
+ debug_all_allocs[i].blk.size = 0u;
+ }
+ }
+#endif
+}
+
+void
+pk_bucket_reset(struct pk_membucket* bkt)
+{
+#ifdef PK_MEMORY_DEBUGGER
+ int64_t i;
+#endif
+ if (bkt->transient != true) {
+ PK_LOG_ERR("WARNING: pk_bucket_reset called on non-transient pk_membucket\n");
+ }
+ bkt->head = 0;
+ bkt->lostBytes = 0;
+ bkt->allocs = 0;
+ bkt->lastEmptyBlockIndex = 0;
+ bkt->blocks->data = bkt->ptr;
+ bkt->blocks->size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+#ifdef PK_MEMORY_DEBUGGER
+ for (i = debug_alloc_head; i > -1; --i) {
+ if (debug_all_allocs[i].bkt == bkt) {
+ debug_all_allocs[i].blk.data = NULL;
+ debug_all_allocs[i].blk.size = 0u;
+ }
+ }
+#endif
+}
+
+void
+pk_bucket_insert_block(struct pk_membucket* bkt, const struct pk_memblock* block)
+{
+ int64_t index = bkt->lastEmptyBlockIndex;
+ while (index >= 0) {
+ struct pk_memblock* b = &bkt->blocks[index];
+ struct pk_memblock* nb = &bkt->blocks[index + 1];
+ if (b->data < block->data) {
+ break;
+ }
+ nb->data = b->data;
+ nb->size = b->size;
+ index -= 1;
+ }
+ struct pk_memblock *b = &bkt->blocks[index + 1];
+ b->data = block->data;
+ b->size = block->size;
+ bkt->lastEmptyBlockIndex += 1;
+}
+
+void
+pk_bucket_collapse_empty_blocks(struct pk_membucket* bkt) {
+ for (int64_t i = bkt->lastEmptyBlockIndex; i > -1; --i) {
+ struct pk_memblock* block = &bkt->blocks[i];
+ if (block->size == 0 && i == bkt->lastEmptyBlockIndex) {
+ block->data = nullptr;
+ bkt->lastEmptyBlockIndex -= 1;
+ continue;
+ }
+ if (block->size > 0) {
+ continue;
+ }
+ for (int64_t k = i; k < bkt->lastEmptyBlockIndex; ++k) {
+ bkt->blocks[k].data = bkt->blocks[k + 1].data;
+ bkt->blocks[k].size = bkt->blocks[k + 1].size;
+ }
+ bkt->lastEmptyBlockIndex -= 1;
+ }
+}
+
+void*
+pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt)
+{
+#ifdef PK_MEMORY_FORCE_MALLOC
+ return malloc(sz);
+#endif
+ if (sz == 0) return nullptr;
+ size_t calculatedAlignment = alignment < PK_MINIMUM_ALIGNMENT ? PK_MINIMUM_ALIGNMENT : alignment;
+ size_t misalignment = 0;
+ struct pk_memblock* prevBlock = nullptr;
+ struct pk_memblock* block = nullptr;
+ struct pk_memblock* nextBlock = nullptr;
+ void* data = nullptr;
+ mtx_lock(&bkt->mtx);
+ for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) {
+ struct pk_memblock* blk = &bkt->blocks[i];
+ misalignment = (size_t)(blk->data) % calculatedAlignment;
+ misalignment = (calculatedAlignment - misalignment) % calculatedAlignment;
+ if (blk->size >= sz + misalignment) {
+ block = blk;
+ if (i < bkt->lastEmptyBlockIndex && bkt->blocks[i + 1].data == block->data + block->size) {
+ nextBlock = &bkt->blocks[i + 1];
+ }
+ if (i > 0 && i != bkt->lastEmptyBlockIndex && (bkt->blocks[i-1].data + bkt->blocks[i-1].size) == block->data) {
+ prevBlock = &bkt->blocks[i - 1];
+ }
+ break;
+ }
+ }
+ if (block == nullptr) {
+ mtx_unlock(&bkt->mtx);
+ assert(block != nullptr && "memory corruption: not enough space in chosen bkt");
+ }
+ data = block->data + misalignment;
+#ifdef PK_MEMORY_DEBUGGER
+ bool handled = bkt->transient;
+ if (handled == false) {
+ for (int64_t i = 0; i < debug_alloc_head; ++i) {
+ struct pk_dbg_memblock* mb = &debug_all_allocs[i];
+ if (mb->bkt != NULL) continue;
+ assert((mb->blk.size == 0 || (void*)(mb->blk.data) != data) && "mem address alloc'd twice!");
+ if (mb->blk.size == 0) {
+ mb->blk.data = (char*)(data);
+ mb->blk.size = sz;
+ mb->bkt = bkt;
+ handled = true;
+ break;
+ }
+ }
+ }
+ if (handled == false) {
+ debug_all_allocs[debug_alloc_head++] = (struct pk_dbg_memblock){
+ .blk = (struct pk_memblock) {
+ .data = (char*)(data),
+ .size = sz,
+ },
+ .bkt = bkt,
+ };
+ }
+#endif
+ int64_t afterSize = block->size - (misalignment + sz);
+ if (block->data == bkt->ptr + bkt->head) {
+ bkt->head += (sz + misalignment);
+ }
+ if (afterSize > 0 && nextBlock == nullptr) {
+ struct pk_memblock newBlock;
+ memset(&newBlock, 0, sizeof(struct pk_memblock));
+ newBlock.data = block->data + misalignment + sz;
+ newBlock.size = afterSize;
+ pk_bucket_insert_block(bkt, &newBlock);
+ }
+ if (prevBlock == nullptr && nextBlock == nullptr) {
+ block->size = misalignment;
+ } else if (nextBlock != nullptr) {
+ block->size = misalignment;
+ nextBlock->data -= afterSize;
+ nextBlock->size += afterSize;
+ } else if (prevBlock != nullptr) {
+ prevBlock->size += misalignment;
+ block->data += misalignment + sz;
+ block->size = 0; // if you make it here, afterSize has already been handled
+ }
+ bkt->allocs++;
+ assert(data >= bkt->raw && "allocated data is before bucket data");
+ assert((char*)data <= bkt->ptr + bkt->size && "allocated data is after bucket data");
+ pk_bucket_collapse_empty_blocks(bkt);
+#ifdef PK_MEMORY_DEBUGGER
+ if (!bkt->transient) {
+ int64_t debug_tracked_alloc_size = 0;
+ int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+ for (int64_t i = 0; i < debug_alloc_head; ++i) {
+ if (debug_all_allocs[i].bkt != bkt) continue;
+ debug_tracked_alloc_size += debug_all_allocs[i].blk.size;
+ }
+ for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) {
+ debug_bucket_alloc_size -= bkt->blocks[i].size;
+ }
+ assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!");
+ }
+#endif
+ mtx_unlock(&bkt->mtx);
+ return data;
+}
+
+void*
+pk_new_base(size_t sz, size_t alignment)
+{
+ struct pk_membucket* bkt = nullptr;
+ for (long i = 0; i < pk_bucket_head; ++i) {
+ if (pk_buckets[i].transient == false && pk_buckets[i].size - pk_buckets[i].head > sz + PK_MAXIMUM_ALIGNMENT) {
+ bkt = &pk_buckets[i];
+ break;
+ }
+ }
+ if (bkt == nullptr) {
+ bkt = &pk_buckets[pk_bucket_create_inner(PK_DEFAULT_BUCKET_SIZE, false, "pk_bucket internally created")];
+ }
+ return pk_new_bkt(sz, alignment, bkt);
+}
+
+void*
+pk_new(size_t sz, size_t alignment, struct pk_membucket* bkt)
+{
+ if (bkt != NULL) return pk_new_bkt(sz, alignment, bkt);
+ return pk_new_base(sz, alignment);
+}
+
+void
+pk_delete_bkt(const void* ptr, size_t sz, struct pk_membucket* bkt)
+{
+#ifdef PK_MEMORY_FORCE_MALLOC
+ return std::free(const_cast<void*>(ptr));
+#endif
+ mtx_lock(&bkt->mtx);
+ assert(ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size && "pointer not in memory bucket range");
+ assert(sz > 0 && "attempted to free pointer of size 0");
+#ifdef PK_MEMORY_DEBUGGER
+ bool found = bkt->transient;
+ if (found == false) {
+ for (int64_t i = debug_alloc_head - 1; i > -1; --i) {
+ struct pk_dbg_memblock* mb = &debug_all_allocs[i];
+ if (mb->bkt != bkt) continue;
+ if (mb->blk.size == 0) continue;
+ if ((void*)(mb->blk.data) == ptr) {
+ assert(mb->blk.size == sz && "[pkmem.h] incorrect free size");
+ mb->blk.size = 0;
+ mb->bkt = NULL;
+ found = true;
+ if (i == (debug_alloc_head - 1)) {
+ debug_alloc_head--;
+ }
+ break;
+ }
+ }
+ }
+ assert(found && "[pkmem.h] double free or invalid ptr");
+#endif
+ bkt->allocs--;
+ if (bkt->allocs == 0) {
+ bkt->head = 0;
+ bkt->lastEmptyBlockIndex = 0;
+ bkt->blocks[0].data = bkt->ptr;
+ bkt->blocks[0].size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+ return;
+ }
+ char* afterPtr = ((char*)(ptr))+sz;
+ struct pk_memblock* beforeBlk = nullptr;
+ struct pk_memblock* afterBlk = nullptr;
+ for (int64_t i = bkt->lastEmptyBlockIndex; i > 0; --i) {
+ if (bkt->blocks[i-1].data + bkt->blocks[i-1].size == ptr) {
+ beforeBlk = &bkt->blocks[i-1];
+ }
+ if (bkt->blocks[i].data == afterPtr) {
+ afterBlk = &bkt->blocks[i];
+ break;
+ }
+ if (bkt->blocks[i-1].data < (char*)ptr) {
+ break;
+ }
+ }
+ if (ptr == bkt->ptr && afterBlk == nullptr && bkt->blocks[0].data == afterPtr) {
+ afterBlk = &bkt->blocks[0];
+ }
+ if (afterBlk != nullptr && afterBlk->data == bkt->ptr + bkt->head) {
+ bkt->head -= sz;
+ if (beforeBlk != nullptr) {
+ bkt->head -= beforeBlk->size;
+ }
+ }
+ if (beforeBlk == nullptr && afterBlk == nullptr) {
+ struct pk_memblock newBlock;
+ memset(&newBlock, 0, sizeof(struct pk_memblock));
+ newBlock.data = (char*)ptr;
+ newBlock.size = sz;
+ pk_bucket_insert_block(bkt, &newBlock);
+ } else if (beforeBlk != nullptr && afterBlk != nullptr) {
+ beforeBlk->size += sz + afterBlk->size;
+ afterBlk->size = 0;
+ } else if (beforeBlk != nullptr) {
+ beforeBlk->size += sz;
+ } else if (afterBlk != nullptr) {
+ afterBlk->data -= sz;
+ afterBlk->size += sz;
+ }
+ pk_bucket_collapse_empty_blocks(bkt);
+#ifdef PK_MEMORY_DEBUGGER
+ if (!bkt->transient) {
+ int64_t debug_tracked_alloc_size = 0;
+ int64_t debug_bucket_alloc_size = bkt->size - (sizeof(struct pk_memblock) * bkt->maxBlockCount);
+ for (int64_t i = 0; i < debug_alloc_head; ++i) {
+ if (debug_all_allocs[i].bkt != bkt) continue;
+ debug_tracked_alloc_size += debug_all_allocs[i].blk.size;
+ }
+ for (int64_t i = 0; i <= bkt->lastEmptyBlockIndex; ++i) {
+ debug_bucket_alloc_size -= bkt->blocks[i].size;
+ }
+ assert(debug_tracked_alloc_size == debug_bucket_alloc_size && "allocation size mismatch!");
+ }
+#endif
+ mtx_unlock(&bkt->mtx);
+}
+
+void
+pk_delete_base(const void* ptr, size_t sz)
+{
+ struct pk_membucket* bkt = nullptr;
+ for (long i = 0; i < pk_bucket_head; ++i) {
+ bkt = &pk_buckets[i];
+ if (ptr >= bkt->raw && (char*)ptr < bkt->ptr + bkt->size) break;
+ }
+ assert(bkt != nullptr && "failed to determine correct memory bucket");
+ pk_delete_bkt(ptr, sz, bkt);
+}
+
+void
+pk_delete(const void* ptr, size_t sz, struct pk_membucket* bkt)
+{
+ if (bkt != NULL) return pk_delete_bkt(ptr, sz, bkt);
+ return pk_delete_base(ptr, sz);
+}
+
+#endif /* PK_IMPL_MEM */
+#ifndef PK_STR_H
+#define PK_STR_H
+
+#include <stdint.h>
+
+struct pk_str {
+ char *val;
+ uint32_t length;
+ uint32_t reserved;
+};
+struct pk_cstr {
+ const char *val;
+ uint32_t length;
+ uint32_t reserved;
+};
+
+struct pk_str cstring_to_pk_str(char *s);
+struct pk_cstr cstring_to_pk_cstr(const char *s);
+struct pk_str pk_cstr_to_pk_str(const struct pk_cstr *s);
+struct pk_cstr pk_str_to_pk_cstr(const struct pk_str *s);
+int pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs);
+int pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs);
+
+#endif /* PK_STR_H */
+
+#ifdef PK_IMPL_STR
+
+#include <string.h>
+
+struct pk_str
+cstring_to_pk_str(char *s)
+{
+ return (struct pk_str) {
+ .val = s,
+ .length = (uint32_t)(strlen(s)),
+ .reserved = 0,
+ };
+}
+
+struct pk_cstr
+cstring_to_pk_cstr(const char *s)
+{
+ return (struct pk_cstr) {
+ .val = s,
+ .length = (uint32_t)(strlen(s)),
+ .reserved = 0,
+ };
+}
+
+struct pk_str
+pk_cstr_to_pk_str(const struct pk_cstr *s)
+{
+ return (struct pk_str) {
+ .val = (char *)(s->val),
+ .length = s->length,
+ .reserved = s->reserved,
+ };
+}
+
+struct pk_cstr
+pk_str_to_pk_cstr(const struct pk_str *s)
+{
+ return (struct pk_cstr) {
+ .val = (char *)(s->val),
+ .length = s->length,
+ .reserved = s->reserved,
+ };
+}
+
+int
+pk_compare_str(const struct pk_str *lhs, const struct pk_str *rhs)
+{
+ return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length));
+}
+
+int
+pk_compare_cstr(const struct pk_cstr *lhs, const struct pk_cstr *rhs)
+{
+ return strncmp(lhs->val, rhs->val, PK_MIN(lhs->length, rhs->length));
+}
+
+#endif /* PK_IMPL_STR */
+#ifndef PK_EV_H
+#define PK_EV_H
+
+#include <stdint.h>
+
+typedef uint64_t pk_ev_mgr_id_T;
+typedef uint64_t pk_ev_id_T;
+
+// note: pk_ev_init() is NOT thread-safe
+void pk_ev_init();
+// note: pk_ev_teardown() is NOT thread-safe
+void pk_ev_teardown();
+
+const pk_ev_mgr_id_T pk_ev_create_mgr();
+void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr);
+
+typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data);
+
+const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data);
+bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data);
+void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data);
+
+#endif /* PK_EV_H */
+
+#ifdef PK_IMPL_EV
+
+
+#include <assert.h>
+#include <stdatomic.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <threads.h>
+#include <string.h>
+
+#ifndef PK_EV_INIT_MGR_COUNT
+# define PK_EV_INIT_MGR_COUNT 1
+#endif
+
+#ifndef PK_EV_INIT_EV_COUNT
+# define PK_EV_INIT_EV_COUNT 16
+#endif
+
+#ifndef PK_EV_INIT_CB_COUNT
+# define PK_EV_INIT_CB_COUNT 8
+#endif
+
+#ifndef PK_EV_GROW_RATIO
+# define PK_EV_GROW_RATIO 1.5
+#endif
+
+struct pk_ev_cb {
+ pk_ev_cb_fn *cb;
+ void *user_cb_data;
+};
+
+struct pk_ev {
+ struct pk_ev_cb *ev_cbs;
+ void *user_ev_data;
+ atomic_uint_fast8_t n_ev_cbs;
+};
+
+struct pk_ev_mgr {
+ struct pk_ev *ev;
+ atomic_uint_fast8_t n_ev;
+ atomic_uint_fast8_t rn_ev;
+ atomic_uint_fast8_t rn_cb;
+};
+
+struct pk_ev_mstr {
+ atomic_uint_fast64_t flg_mgrs;
+ atomic_uint_fast64_t rn_mgrs;
+ struct pk_ev_mgr **mgrs;
+ mtx_t *mtxs;
+};
+
+struct pk_ev_mstr pk_ev_mstr;
+
+void
+pk_ev_init()
+{
+ int i;
+ pk_ev_mstr.mgrs = (struct pk_ev_mgr **)malloc(sizeof(void *) * PK_EV_INIT_MGR_COUNT);
+ pk_ev_mstr.mtxs = (mtx_t*)malloc(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT);
+ memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT);
+ memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT);
+ for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) {
+ mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain);
+ }
+ atomic_store(&pk_ev_mstr.flg_mgrs, 0);
+ atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT);
+}
+
+void
+pk_ev_teardown()
+{
+ int i;
+ for (i = 0; i < pk_ev_mstr.rn_mgrs; ++i) {
+ if ((atomic_load(&pk_ev_mstr.rn_mgrs) & (1lu << i)) == 0) continue;
+ mtx_lock(&pk_ev_mstr.mtxs[i]);
+ free(pk_ev_mstr.mgrs[i]);
+ pk_ev_mstr.mgrs[i] = NULL;
+ mtx_unlock(&pk_ev_mstr.mtxs[i]);
+ mtx_destroy(&pk_ev_mstr.mtxs[i]);
+ }
+ free(pk_ev_mstr.mgrs);
+ free(pk_ev_mstr.mtxs);
+ pk_ev_mstr.mgrs = NULL;
+ pk_ev_mstr.mtxs = NULL;
+}
+
+static struct pk_ev_mgr*
+pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
+{
+ assert(ev_count < 0x100);
+ assert(cb_count < 0x100);
+ int i;
+ struct pk_ev *ev;
+ size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (struct pk_ev_cb) * ev_count * cb_count);
+ size_t sz_ev = (sizeof(struct pk_ev_cb) * cb_count);
+ size_t sz_evs = sizeof(struct pk_ev) * ev_count;
+
+ struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz);
+ if (mgr == NULL) goto early_exit;
+
+ memset(mgr, 0, sz);
+ mgr->ev = (struct pk_ev*)(((char *)mgr) + sizeof(struct pk_ev_mgr));
+ atomic_init(&mgr->rn_ev, ev_count);
+ atomic_init(&mgr->rn_cb, cb_count);
+ atomic_init(&mgr->n_ev, 0);
+ for (i = 0; i < mgr->rn_ev; ++i) {
+ ev = &mgr->ev[i];
+ atomic_init(&ev->n_ev_cbs, 0);
+ ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i));
+ }
+
+early_exit:
+ return mgr;
+}
+
+static void
+pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr)
+{
+ int i;
+ struct pk_ev *ev_old;
+ struct pk_ev *ev;
+ atomic_store(&mgr->n_ev, atomic_load(&old->n_ev));
+ for (i = 0; i < old->n_ev; ++i) {
+ ev_old = &old->ev[i];
+ ev = &mgr->ev[i];
+ memcpy(ev->ev_cbs, ev_old->ev_cbs, sizeof(struct pk_ev_cb) * atomic_load(&ev_old->n_ev_cbs));
+ atomic_store(&ev->n_ev_cbs, atomic_load(&ev_old->n_ev_cbs));
+ }
+}
+
+const pk_ev_mgr_id_T
+pk_ev_create_mgr()
+{
+ uint64_t i;
+ pk_ev_mgr_id_T flg;
+ pk_ev_mgr_id_T flg_new;
+ pk_ev_mgr_id_T id;
+ struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT);
+ if (mgr == NULL) return -1;
+start:
+ flg = atomic_load(&pk_ev_mstr.flg_mgrs);
+ while (1) {
+ flg_new = flg;
+ for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) {
+ if ((flg & (1u << i)) == 0) break;
+ }
+ if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) {
+ goto recreate;
+ }
+ id = i;
+ flg_new |= (1u << i);
+ if (atomic_compare_exchange_strong(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break;
+ thrd_yield();
+ }
+ pk_ev_mstr.mgrs[id]= mgr;
+ return id;
+recreate:
+ // TODO recreate mgr, out of space
+ assert(1 == 0 && "[pkev.h] Out of mgr space.");
+ exit(1);
+ goto start;
+}
+
+void
+pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr)
+{
+ assert(evmgr >= 0);
+ mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
+ free(pk_ev_mstr.mgrs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr] = NULL;
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+}
+
+const pk_ev_id_T
+pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data)
+{
+ assert(evmgr < 64);
+ uint64_t new_size;
+ pk_ev_id_T id;
+ struct pk_ev_mgr *mgr;
+ mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
+ if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) {
+ new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO));
+ if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_ev) {
+ PK_LOG_ERR("[pkev.h] need more room, but failed to grow ev count.\n");
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ exit(1);
+ }
+ mgr = pk_ev_inner_ev_mgr_create(new_size, pk_ev_mstr.mgrs[evmgr]->rn_cb);
+ pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr);
+ free(pk_ev_mstr.mgrs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr] = mgr;
+ }
+ id = pk_ev_mstr.mgrs[evmgr]->n_ev++;
+ pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data;
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ return id;
+}
+
+bool
+pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data)
+{
+ assert(evmgr < 64);
+ uint64_t new_size;
+ struct pk_ev_mgr *mgr;
+ uint8_t cb_index;
+ mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
+ if (pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
+ new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO));
+ if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
+ PK_LOG_ERR("[pkev.h] need more room, but failed to grow cb count.\n");
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ exit(1);
+ }
+ mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, new_size);
+ pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr);
+ free(pk_ev_mstr.mgrs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr] = mgr;
+ }
+ cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs++;
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb;
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data;
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ return true;
+}
+
+void
+pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data)
+{
+ assert(evmgr < 64);
+ uint8_t i;
+ for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs; ++i) {
+ (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)(
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data,
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data,
+ user_emit_data);
+ }
+}
+
+#endif /* PK_IMPL_EV */
+#ifndef PK_PKARR_H
+#define PK_PKARR_H
+
+#include <stdint.h>
+
+struct pk_arr {
+ uint32_t next;
+ uint32_t reserved;
+ uint32_t stride;
+ uint32_t alignment;
+ struct pk_membucket *bkt;
+ void *data;
+};
+
+typedef bool(pk_arr_item_compare)(void *user_data, void *item);
+
+void pk_arr_clear(struct pk_arr *arr);
+void pk_arr_reset(struct pk_arr *arr);
+void pk_arr_reserve(struct pk_arr *arr, uint32_t count);
+void pk_arr_resize(struct pk_arr *arr, uint32_t count);
+void pk_arr_move_to_back(struct pk_arr *arr, uint32_t index);
+void pk_arr_append(struct pk_arr *arr, void *data);
+void pk_arr_remove_at(struct pk_arr *arr, uint32_t index);
+uint32_t pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn);
+
+#endif /* PK_PKARR_H */
+#ifdef PK_IMPL_ARR
+
+
+#ifndef PK_ARR_GROW_RATIO
+#define PK_ARR_GROW_RATIO 1.5
+#endif
+#ifndef PK_ARR_INITIAL_COUNT
+#define PK_ARR_INITIAL_COUNT 16
+#endif
+
+void
+pk_arr_clear(struct pk_arr *arr)
+{
+ arr->next = 0;
+}
+
+void
+pk_arr_reset(struct pk_arr *arr)
+{
+ if (arr->data != NULL) pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt);
+ arr->data = NULL;
+ arr->next = 0;
+ arr->reserved = 0;
+}
+
+void
+pk_arr_reserve(struct pk_arr *arr, uint32_t count)
+{
+ if (arr->reserved >= count) return;
+ void *new_data = pk_new(arr->stride * count, arr->alignment, arr->bkt);
+ if (arr->data != NULL) {
+ if (arr->next != 0) {
+ memcpy(new_data, arr->data, arr->stride * arr->reserved);
+ }
+ pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt);
+ }
+ arr->reserved = count;
+ arr->data = new_data;
+}
+
+void
+pk_arr_resize(struct pk_arr *arr, uint32_t count)
+{
+ pk_arr_reserve(arr, count);
+ arr->next = count;
+}
+
+void
+pk_arr_move_to_back(struct pk_arr *arr, uint32_t index)
+{
+ if (arr->reserved == 0) return;
+ if (arr->next <= 1) return;
+#ifdef PK_ARR_MOVE_IN_PLACE
+ uint32_t i, ii;
+ uint8_t *target = (uint8_t *)pk_new(arr->stride, arr->alignment, arr->bkt);
+ uint8_t *buffer = (uint8_t *)arr->data;
+ for (ii = 0, i = arr->stride * index; ii < arr->stride; ++ii, ++i) {
+ target[ii] = buffer[i];
+ }
+ for (i = arr->stride * index; i < (arr->stride * (arr->next - 1)); ++i) {
+ buffer[i] = buffer[i + arr->stride];
+ }
+ for (ii = 0, i = arr->stride * (arr->next - 1); ii < arr->stride; ++ii, ++i) {
+ buffer[i] = target[ii];
+ }
+ pk_delete(target, arr->stride, arr->bkt);
+#else
+ char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt);
+ if (index > 0) {
+ memcpy(new_data, arr->data, arr->stride * index);
+ }
+ memcpy(
+ new_data + (arr->stride * (arr->next - 1)),
+ ((char *)arr->data) + (arr->stride * index),
+ arr->stride);
+ memcpy(
+ new_data + (arr->stride * index),
+ ((char *)arr->data) + (arr->stride * (index + 1)),
+ arr->stride * (arr->next - index - 1));
+ pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt);
+ arr->data = (void *)new_data;
+#endif
+}
+
+void
+pk_arr_append(struct pk_arr *arr, void *data)
+{
+ if (arr->reserved == arr->next) {
+ uint32_t new_count = PK_MAX(arr->reserved == 0 ? PK_ARR_INITIAL_COUNT : arr->reserved * PK_ARR_GROW_RATIO, arr->reserved + 1);
+ void *new_data = pk_new(arr->stride * new_count, arr->alignment, arr->bkt);
+ if (arr->data != NULL) {
+ memcpy(new_data, arr->data, arr->stride * arr->reserved);
+ pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt);
+ }
+ arr->data = new_data;
+ arr->reserved = new_count;
+ }
+ memcpy(((char *)arr->data) + (arr->stride * arr->next), data, arr->stride);
+ arr->next += 1;
+ return;
+}
+
+void
+pk_arr_remove_at(struct pk_arr *arr, uint32_t index)
+{
+ if (arr->reserved == 0) return;
+ if (index == arr->next - 1) {
+ arr->next -=1;
+ return;
+ }
+#ifdef PK_ARR_MOVE_IN_PLACE
+ uint32_t i;
+ uint8_t *buffer = (uint8_t *)arr->data;
+ for (i = arr->stride * index; i < (arr->stride * (arr->next - 1)); ++i) {
+ buffer[i] = buffer[i + arr->stride];
+ }
+#else
+ char *new_data = (char *)pk_new(arr->stride * arr->reserved, arr->alignment, arr->bkt);
+ if (index > 0) {
+ memcpy(new_data, arr->data, arr->stride * index);
+ }
+ memcpy(
+ new_data + (arr->stride * index),
+ ((char *)arr->data) + (arr->stride * (index + 1)),
+ arr->stride * (arr->next - index - 1));
+ pk_delete(arr->data, arr->stride * arr->reserved, arr->bkt);
+ arr->data = (void *)new_data;
+#endif
+ arr->next -= 1;
+}
+
+uint32_t
+pk_arr_find_first_index(struct pk_arr *arr, void *user_data, pk_arr_item_compare *fn)
+{
+ uint32_t i;
+ char *char_data = (char *)arr->data;
+ for (i = 0; i < arr->next; ++i) {
+ if (fn(user_data, char_data + (arr->stride * i))) return i;
+ }
+ return -1;
+}
+
+#endif /* PK_IMPL_ARR */
+#ifndef PK_PK_STN_H
+#define PK_PK_STN_H
+
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+enum PK_STN_RES {
+ PK_STN_RES_SUCCESS,
+ PK_STN_RES_OVERFLOW,
+ PK_STN_RES_UNDERFLOW,
+ PK_STN_RES_INCONVERTIBLE
+};
+
+enum PK_STN_RES pk_stn_int64_t(int64_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_uint64_t(uint64_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_int32_t(int32_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_uint32_t(uint32_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_int16_t(int16_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_uint16_t(uint16_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_int8_t(int8_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_uint8_t(uint8_t *i, char const *s, int base);
+enum PK_STN_RES pk_stn_float(float *f, char const *s);
+enum PK_STN_RES pk_stn_double(double *d, char const *s);
+enum PK_STN_RES pk_stn_float_e(float *f, char const *s, char **pEnd);
+enum PK_STN_RES pk_stn_double_e(double *d, char const *s, char **pEnd);
+
+#if defined(__cplusplus)
+
+template <typename T>
+enum PK_STN_RES pk_stn(T *n, char const *s, int base = 0)
+{
+ if constexpr(std::is_same<T, int64_t>::value) {
+ return pk_stn_int64_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, uint64_t>::value) {
+ return pk_stn_uint64_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, int32_t>::value) {
+ return pk_stn_int32_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, uint32_t>::value) {
+ return pk_stn_uint32_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, int16_t>::value) {
+ return pk_stn_int16_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, uint16_t>::value) {
+ return pk_stn_uint16_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, int8_t>::value) {
+ return pk_stn_int8_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, uint8_t>::value) {
+ return pk_stn_uint8_t(n, s, base);
+ }
+ if constexpr(std::is_same<T, float>::value) {
+ return pk_stn_float(n, s);
+ }
+ if constexpr(std::is_same<T, double>::value) {
+ return pk_stn_double(n, s);
+ }
+ return (PK_STN_RES)-1;
+}
+
+#endif /* defined(__cplusplus) */
+
+#endif /* PK_PK_STN_H */
+
+#ifdef PK_IMPL_STN
+
+enum PK_STN_RES
+pk_stn_int64_t(int64_t *i, char const *s, int base)
+{
+ char *end;
+ long long l;
+ errno = 0;
+ l = strtoll(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == LLONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_uint64_t(uint64_t *i, char const *s, int base)
+{
+ char *end;
+ unsigned long long l;
+ errno = 0;
+ l = strtoull(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == ULLONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_int32_t(int32_t *i, char const *s, int base)
+{
+ char *end;
+ long l;
+ errno = 0;
+ l = strtol(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == LONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_uint32_t(uint32_t *i, char const *s, int base)
+{
+ char *end;
+ unsigned long l;
+ errno = 0;
+ l = strtoul(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_int16_t(int16_t *i, char const *s, int base)
+{
+ char *end;
+ long l;
+ errno = 0;
+ l = strtol(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == LONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_uint16_t(uint16_t *i, char const *s, int base)
+{
+ char *end;
+ unsigned long l;
+ errno = 0;
+ l = strtoul(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_int8_t(int8_t *i, char const *s, int base)
+{
+ char *end;
+ long l;
+ errno = 0;
+ l = strtol(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == LONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_uint8_t(uint8_t *i, char const *s, int base)
+{
+ char *end;
+ unsigned long l;
+ errno = 0;
+ l = strtoul(s, &end, base);
+ if (errno == ERANGE) {
+ if (l == ULONG_MAX) return PK_STN_RES_OVERFLOW;
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || *end != '\0') {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *i = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_float(float *f, char const *s)
+{
+ char *end;
+ return pk_stn_float_e(f, s, &end);
+}
+
+enum PK_STN_RES
+pk_stn_double(double *d, char const *s)
+{
+ char *end;
+ return pk_stn_double_e(d, s, &end);
+}
+
+enum PK_STN_RES
+pk_stn_float_e(float *f, char const *s, char **pEnd)
+{
+ float l;
+ errno = 0;
+ l = strtof(s, pEnd);
+ if (errno == ERANGE && l == HUGE_VALF) {
+ return PK_STN_RES_OVERFLOW;
+ }
+ if (errno == ERANGE && l == -HUGE_VALF) {
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || &s == (const char **)pEnd) {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *f = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+enum PK_STN_RES
+pk_stn_double_e(double *d, char const *s, char **pEnd)
+{
+ double l;
+ errno = 0;
+ l = strtod(s, pEnd);
+ if (errno == ERANGE && l == HUGE_VAL) {
+ return PK_STN_RES_OVERFLOW;
+ }
+ if (errno == ERANGE && l == -HUGE_VAL) {
+ return PK_STN_RES_UNDERFLOW;
+ }
+ if (*s == '\0' || &s == (const char **)pEnd) {
+ return PK_STN_RES_INCONVERTIBLE;
+ }
+ *d = l;
+ return PK_STN_RES_SUCCESS;
+}
+
+#endif /* PK_IMPL_STN */
+#ifndef PK_PKTMR_H
+#define PK_PKTMR_H
+
+#include <time.h>
+
+/* 2024-12-17 JCB
+ * I have read that in more recent Linux kernels, _MONOTONIC and _REALTIME
+ * do not require syscalls, while all of the other calls can.
+ * In testing on my personal machine, this seems to hold true. Using
+ * CLOCK_PROCESS_CPUTIME_ID consistently elapsed thousands of nanoseconds,
+ * even with no work between sequential _start() and _stop() calls.
+ * Meanwhile, the same test with _MONOTONIC elapsed only tens of nanoseconds.
+ * Consider replacing explicit usage with a define for more user control.
+ */
+
+/* struct pk_tmr */
+struct pk_tmr {
+ struct timespec b; // begin
+ struct timespec e; // end
+};
+
+#define pk_tmr_start(tmr) { clock_gettime(CLOCK_MONOTONIC, &tmr.b); }
+#define pk_tmr_stop(tmr) { clock_gettime(CLOCK_MONOTONIC, &tmr.e); }
+#define pk_tmr_duration_double(tmr) ((1000.0 * tmr.e.tv_sec + 1e-6 * tmr.e.tv_nsec) - (1000.0 * tmr.b.tv_sec + 1e-6 * tmr.b.tv_nsec))
+#define pk_tmr_duration_nano(tmr) ((((uint64_t)tmr.e.tv_sec * (uint64_t)1000000000) + tmr.e.tv_nsec) - (((uint64_t)tmr.b.tv_sec * (uint64_t)1000000000) + (uint64_t)tmr.b.tv_nsec))
+
+#endif /* PK_PKTMR_H */
+#endif /* PK_SINGLE_HEADER_FILE_H */