summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Bradley <jcb@pikum.xyz>2025-03-14 16:02:56 -0400
committerJonathan Bradley <jcb@pikum.xyz>2025-03-14 16:02:56 -0400
commit25964cf919edb944c91fdc166edc5130537d9267 (patch)
treee7d8154e0e36bd6cef28a97125e34bea2a337de6
parente17d9402663859f7fd1d83bdc5e6b7bf0489974e (diff)
pke: update pk.h to 0.4.0
-rw-r--r--src/pk.h91
1 files changed, 64 insertions, 27 deletions
diff --git a/src/pk.h b/src/pk.h
index 791ecc0..78b294a 100644
--- a/src/pk.h
+++ b/src/pk.h
@@ -1,7 +1,7 @@
#ifndef PK_SINGLE_HEADER_FILE_H
#define PK_SINGLE_HEADER_FILE_H
/*******************************************************************************
-* PK Single-Header-Library V0.3.0
+* PK Single-Header-Library V0.4.0
*
* Author: Jonathan Bradley
* Copyright: © 2024-2025 Jonathan Bradley
@@ -176,9 +176,18 @@
*
* Offers a set of `pk_tmr*` macros for elapsed time checking.
*
+* The following definitions (shown with defaults) can be overridden:
+* PK_TMR_CLOCK CLOCK_MONOTONIC
+*
+* If your needs require you to use more than one clock, I recommend calling
+* `clock_gettime` manually instead of calling `pk_tmr_start`/`pk_tmr_stop`.
+* `pk_tmr.b` is the start time.
+* `pk_tmr.e` end the end time.
+* You could then call the `pk_tmr_duration...` convenience macros as needed.
+*
*******************************************************************************/
-#define PK_VERSION "0.3.0"
+#define PK_VERSION "0.4.0"
#ifdef PK_IMPL_ALL
# ifndef PK_IMPL_MEM_TYPES
@@ -905,8 +914,8 @@ pk_new_bkt(size_t sz, size_t alignment, struct pk_membucket* bkt)
#ifdef PK_MEMORY_FORCE_MALLOC
return malloc(sz);
#endif
- assert((bkt->size - bkt->head) > (sz + alignment -1) && "Not enough space in bucket");
if (sz == 0) return nullptr;
+ assert((bkt->size - bkt->head) > (sz + alignment -1) && "Not enough space in bucket");
size_t i;
size_t calculatedAlignment = alignment < PK_MINIMUM_ALIGNMENT ? PK_MINIMUM_ALIGNMENT : alignment;
size_t misalignment = 0;
@@ -1244,6 +1253,8 @@ typedef uint64_t pk_ev_mgr_id_T;
typedef uint64_t pk_ev_id_T;
typedef uint64_t pk_ev_cb_id_T;
+// TODO re-think threading
+
// note: pk_ev_init() is NOT thread-safe
void pk_ev_init();
// note: pk_ev_teardown() is NOT thread-safe
@@ -1352,6 +1363,19 @@ pk_ev_teardown()
pk_ev_mstr.mtxs = NULL;
}
+size_t
+pk_ev_inner_calc_sz(uint64_t ev_count, uint64_t cb_count, size_t *sz_ev_list, size_t *sz_ev_cb_list)
+{
+ // base sizes
+ if (sz_ev_list != nullptr) *sz_ev_list = sizeof(struct pk_ev) * ev_count;
+ if (sz_ev_cb_list != nullptr) *sz_ev_cb_list = sizeof(struct pk_ev_cb) * cb_count;
+
+ size_t ret = sizeof(struct pk_ev_mgr);
+ if (sz_ev_list != nullptr) ret += *sz_ev_list;
+ if (sz_ev_cb_list != nullptr) ret += *sz_ev_cb_list * ev_count;
+ return ret;
+}
+
static struct pk_ev_mgr*
pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
{
@@ -1359,9 +1383,10 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
assert(cb_count < 0x100);
uint64_t i;
struct pk_ev *ev;
- size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (struct pk_ev_cb) * ev_count * cb_count);
- size_t sz_ev = (sizeof(struct pk_ev_cb) * cb_count);
- size_t sz_evs = sizeof(struct pk_ev) * ev_count;
+ size_t sz_ev_list;
+ size_t sz_ev_cb_list;
+ size_t sz = pk_ev_inner_calc_sz(ev_count, cb_count, &sz_ev_list, &sz_ev_cb_list);
+ size_t sz_offset;
struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz);
if (mgr == NULL) goto early_exit;
@@ -1370,11 +1395,14 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
atomic_init(&mgr->rn_ev, ev_count);
atomic_init(&mgr->rn_cb, cb_count);
atomic_init(&mgr->n_ev, 0);
- for (i = 0; i < mgr->rn_ev; ++i) {
+ for (i = 0; i < ev_count; ++i) {
ev = &mgr->ev[i];
atomic_init(&ev->left_ev_cbs, 0);
atomic_init(&ev->right_ev_cbs, 0);
- ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i));
+ sz_offset = sizeof(struct pk_ev_mgr);
+ sz_offset += sz_ev_list;
+ sz_offset += sz_ev_cb_list * i;
+ ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sz_offset);
}
early_exit:
@@ -1388,10 +1416,14 @@ pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr)
struct pk_ev *ev_old;
struct pk_ev *ev;
atomic_store(&mgr->n_ev, atomic_load(&old->n_ev));
+ size_t old_sz_ev_cb_list;
for (i = 0; i < old->n_ev; ++i) {
ev_old = &old->ev[i];
ev = &mgr->ev[i];
- memcpy(ev->ev_cbs, ev_old->ev_cbs, sizeof(struct pk_ev_cb) * atomic_load(&ev_old->right_ev_cbs));
+ pk_ev_inner_calc_sz(0, atomic_load(&ev_old->right_ev_cbs), nullptr, &old_sz_ev_cb_list);
+ ev->user_ev_data = ev_old->user_ev_data;
+ memcpy(ev->ev_cbs, ev_old->ev_cbs, old_sz_ev_cb_list);
+ atomic_store(&ev->left_ev_cbs, atomic_load(&ev_old->left_ev_cbs));
atomic_store(&ev->right_ev_cbs, atomic_load(&ev_old->right_ev_cbs));
}
}
@@ -1444,7 +1476,7 @@ pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data)
assert(evmgr < 64);
uint64_t new_size;
pk_ev_id_T id;
- struct pk_ev_mgr *mgr;
+ struct pk_ev_mgr *mgr = nullptr;
mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) {
new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO));
@@ -1459,8 +1491,8 @@ pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data)
pk_ev_mstr.mgrs[evmgr] = mgr;
}
id = pk_ev_mstr.mgrs[evmgr]->n_ev++;
- pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data;
mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data;
return id;
}
@@ -1470,28 +1502,26 @@ pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *
assert(evmgr < 64);
bool found = false;
uint64_t new_size, i;
- struct pk_ev_mgr *mgr;
+ struct pk_ev_mgr *mgr = nullptr;
pk_ev_cb_id_T cb_index;
-
- mgr = pk_ev_mstr.mgrs[evmgr];
- if (mgr == nullptr) {
- PK_LOGV_ERR("[pkev.h] unknown event: '%lu'.\n", evmgr);
+ if (pk_ev_mstr.mgrs[evmgr] == nullptr) {
+ PK_LOGV_ERR("[pkev.h] unknown manager: '%lu'.\n", evmgr);
exit(1);
}
- mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
- for (i = mgr->ev[evid].left_ev_cbs; i < mgr->ev[evid].right_ev_cbs; ++i) {
- if (found == false && mgr->ev[evid].ev_cbs[i].cb != nullptr) {
+ for (i = pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) {
+ if (found == false && pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb != nullptr) {
found = true;
cb_index = i;
continue;
}
if (found == false) continue;
- if (mgr->ev[evid].ev_cbs[i].cb == nullptr) {
- mgr->ev[evid].left_ev_cbs = i;
+ if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) {
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs = i;
break;
}
}
if (found == false) {
+ mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
if (pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO));
if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
@@ -1503,15 +1533,16 @@ pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *
pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr);
free(pk_ev_mstr.mgrs[evmgr]);
pk_ev_mstr.mgrs[evmgr] = mgr;
+ mgr = nullptr;
}
cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs++;
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
if (cb_index == pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs) {
pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs++;
}
}
pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb;
pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data;
- mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
return cb_index;
}
@@ -2031,7 +2062,6 @@ pk_stn_double_e(double *d, char const *s, char **pEnd)
* CLOCK_PROCESS_CPUTIME_ID consistently elapsed thousands of nanoseconds,
* even with no work between sequential _start() and _stop() calls.
* Meanwhile, the same test with _MONOTONIC elapsed only tens of nanoseconds.
- * Consider replacing explicit usage with a define for more user control.
*/
/* struct pk_tmr */
@@ -2040,10 +2070,17 @@ struct pk_tmr {
struct timespec e; // end
};
-#define pk_tmr_start(tmr) { clock_gettime(CLOCK_MONOTONIC, &tmr.b); }
-#define pk_tmr_stop(tmr) { clock_gettime(CLOCK_MONOTONIC, &tmr.e); }
-#define pk_tmr_duration_double(tmr) ((1000.0 * tmr.e.tv_sec + 1e-6 * tmr.e.tv_nsec) - (1000.0 * tmr.b.tv_sec + 1e-6 * tmr.b.tv_nsec))
-#define pk_tmr_duration_nano(tmr) ((((uint64_t)tmr.e.tv_sec * (uint64_t)1000000000) + tmr.e.tv_nsec) - (((uint64_t)tmr.b.tv_sec * (uint64_t)1000000000) + (uint64_t)tmr.b.tv_nsec))
+#ifndef PK_TMR_CLOCK
+ #define PK_TMR_CLOCK CLOCK_MONOTONIC
+#endif
+
+#define pk_tmr_start(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.b); }
+#define pk_tmr_stop(tmr) { clock_gettime(PK_TMR_CLOCK, &tmr.e); }
+#define pk_tmr_duration_u64_nano(tmr) ((((unsigned long long int)tmr.e.tv_sec * 1000000000llu) + tmr.e.tv_nsec) - (((unsigned long long int)tmr.b.tv_sec * 1000000000llu) + (unsigned long long int)tmr.b.tv_nsec))
+#define pk_tmr_duration_dbl_nano(tmr) ((1e+9 * tmr.e.tv_sec + tmr.e.tv_nsec) - (1e+9 * tmr.b.tv_sec + tmr.b.tv_nsec))
+#define pk_tmr_duration_dbl_micro(tmr) ((1e+6 * tmr.e.tv_sec + 1e-3 * tmr.e.tv_nsec) - (1e+6 * tmr.b.tv_sec + 1e-3 * tmr.b.tv_nsec))
+#define pk_tmr_duration_dbl_mili(tmr) ((1e+3 * tmr.e.tv_sec + 1e-6 * tmr.e.tv_nsec) - (1e+3 * tmr.b.tv_sec + 1e-6 * tmr.b.tv_nsec))
+#define pk_tmr_duration_dbl_scnd(tmr) ((tmr.e.tv_sec + 1e-9 * tmr.e.tv_nsec) - (tmr.b.tv_sec + 1e-9 * tmr.b.tv_nsec))
#endif /* PK_PKTMR_H */
#endif /* PK_SINGLE_HEADER_FILE_H */