summaryrefslogtreecommitdiff
path: root/pkev.h
diff options
context:
space:
mode:
authorJonathan Bradley <jcb@pikum.xyz>2025-03-14 15:42:02 -0400
committerJonathan Bradley <jcb@pikum.xyz>2025-03-14 15:52:58 -0400
commit66cd4bdd9b5a284060f825b43bbfab5b21194975 (patch)
treeb20400b444c1d631bb052562251a738d6bd02b73 /pkev.h
parent9f498969d9001eaf5eacdebd10ae5248e1e9e289 (diff)
pkev: correctly clone managers on recreation
Diffstat (limited to 'pkev.h')
-rw-r--r--pkev.h85
1 files changed, 48 insertions, 37 deletions
diff --git a/pkev.h b/pkev.h
index c67b357..a3a8d34 100644
--- a/pkev.h
+++ b/pkev.h
@@ -7,6 +7,8 @@ typedef uint64_t pk_ev_mgr_id_T;
typedef uint64_t pk_ev_id_T;
typedef uint64_t pk_ev_cb_id_T;
+// TODO re-think threading
+
// note: pk_ev_init() is NOT thread-safe
void pk_ev_init();
// note: pk_ev_teardown() is NOT thread-safe
@@ -116,6 +118,19 @@ pk_ev_teardown()
pk_ev_mstr.mtxs = NULL;
}
+size_t
+pk_ev_inner_calc_sz(uint64_t ev_count, uint64_t cb_count, size_t *sz_ev_list, size_t *sz_ev_cb_list)
+{
+ // base sizes
+ if (sz_ev_list != nullptr) *sz_ev_list = sizeof(struct pk_ev) * ev_count;
+ if (sz_ev_cb_list != nullptr) *sz_ev_cb_list = sizeof(struct pk_ev_cb) * cb_count;
+
+ size_t ret = sizeof(struct pk_ev_mgr);
+ if (sz_ev_list != nullptr) ret += *sz_ev_list;
+ if (sz_ev_cb_list != nullptr) ret += *sz_ev_cb_list * ev_count;
+ return ret;
+}
+
static struct pk_ev_mgr*
pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
{
@@ -123,12 +138,10 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
assert(cb_count < 0x100);
uint64_t i;
struct pk_ev *ev;
- // TODO refactor this.
- // Consider moving this to a function or set of functions.
- // Want to ensure anywhere we're doing this type of math is consistent.
- size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (struct pk_ev_cb) * ev_count * cb_count);
- size_t sz_ev = (sizeof(struct pk_ev_cb) * cb_count);
- size_t sz_evs = sizeof(struct pk_ev) * ev_count;
+ size_t sz_ev_list;
+ size_t sz_ev_cb_list;
+ size_t sz = pk_ev_inner_calc_sz(ev_count, cb_count, &sz_ev_list, &sz_ev_cb_list);
+ size_t sz_offset;
struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz);
if (mgr == NULL) goto early_exit;
@@ -137,11 +150,14 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count)
atomic_init(&mgr->rn_ev, ev_count);
atomic_init(&mgr->rn_cb, cb_count);
atomic_init(&mgr->n_ev, 0);
- for (i = 0; i < mgr->rn_ev; ++i) {
+ for (i = 0; i < ev_count; ++i) {
ev = &mgr->ev[i];
atomic_init(&ev->left_ev_cbs, 0);
atomic_init(&ev->right_ev_cbs, 0);
- ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i));
+ sz_offset = sizeof(struct pk_ev_mgr);
+ sz_offset += sz_ev_list;
+ sz_offset += sz_ev_cb_list * i;
+ ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sz_offset);
}
early_exit:
@@ -155,15 +171,13 @@ pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr)
struct pk_ev *ev_old;
struct pk_ev *ev;
atomic_store(&mgr->n_ev, atomic_load(&old->n_ev));
- atomic_store(&mgr->rn_ev, atomic_load(&old->rn_ev));
- atomic_store(&mgr->rn_cb, atomic_load(&old->rn_cb));
+ size_t old_sz_ev_cb_list;
for (i = 0; i < old->n_ev; ++i) {
ev_old = &old->ev[i];
ev = &mgr->ev[i];
+ pk_ev_inner_calc_sz(0, atomic_load(&ev_old->right_ev_cbs), nullptr, &old_sz_ev_cb_list);
ev->user_ev_data = ev_old->user_ev_data;
- // TODO store size in variable so it can be debuggable.
- // Also make sure this is even right.
- memcpy(ev->ev_cbs, ev_old->ev_cbs, sizeof(struct pk_ev_cb) * atomic_load(&ev_old->right_ev_cbs));
+ memcpy(ev->ev_cbs, ev_old->ev_cbs, old_sz_ev_cb_list);
atomic_store(&ev->left_ev_cbs, atomic_load(&ev_old->left_ev_cbs));
atomic_store(&ev->right_ev_cbs, atomic_load(&ev_old->right_ev_cbs));
}
@@ -217,7 +231,7 @@ pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data)
assert(evmgr < 64);
uint64_t new_size;
pk_ev_id_T id;
- struct pk_ev_mgr *mgr;
+ struct pk_ev_mgr *mgr = nullptr;
mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) {
new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO));
@@ -232,8 +246,8 @@ pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data)
pk_ev_mstr.mgrs[evmgr] = mgr;
}
id = pk_ev_mstr.mgrs[evmgr]->n_ev++;
- pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data;
mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data;
return id;
}
@@ -243,50 +257,47 @@ pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *
assert(evmgr < 64);
bool found = false;
uint64_t new_size, i;
- struct pk_ev_mgr *mgr;
- struct pk_ev *ev;
+ struct pk_ev_mgr *mgr = nullptr;
pk_ev_cb_id_T cb_index;
-
- mgr = pk_ev_mstr.mgrs[evmgr];
- if (mgr == nullptr) {
- PK_LOGV_ERR("[pkev.h] unknown event: '%lu'.\n", evmgr);
+ if (pk_ev_mstr.mgrs[evmgr] == nullptr) {
+ PK_LOGV_ERR("[pkev.h] unknown manager: '%lu'.\n", evmgr);
exit(1);
}
- mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
- ev = &mgr->ev[evid];
- for (i = ev->left_ev_cbs; i < ev->right_ev_cbs; ++i) {
- if (found == false && ev->ev_cbs[i].cb != nullptr) {
+ for (i = pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) {
+ if (found == false && pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb != nullptr) {
found = true;
cb_index = i;
continue;
}
if (found == false) continue;
- if (ev->ev_cbs[i].cb == nullptr) {
- ev->left_ev_cbs = i;
+ if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) {
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs = i;
break;
}
}
if (found == false) {
- if (ev->right_ev_cbs > mgr->rn_cb) {
- new_size = PK_MAX(2, PK_MIN(255, mgr->rn_cb * PK_EV_GROW_RATIO));
- if (new_size == mgr->rn_cb) {
+ mtx_lock(&pk_ev_mstr.mtxs[evmgr]);
+ if (pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
+ new_size = PK_MAX(2, PK_MIN(255, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO));
+ if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) {
PK_LOG_ERR("[pkev.h] need more room, but failed to grow cb count.\n");
mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
exit(1);
}
- mgr = pk_ev_inner_ev_mgr_create(mgr->rn_ev, new_size);
+ mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, new_size);
pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr);
free(pk_ev_mstr.mgrs[evmgr]);
pk_ev_mstr.mgrs[evmgr] = mgr;
+ mgr = nullptr;
}
- cb_index = ev->right_ev_cbs++;
- if (cb_index == ev->left_ev_cbs) {
- ev->left_ev_cbs++;
+ cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs++;
+ mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ if (cb_index == pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs) {
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs++;
}
}
- ev->ev_cbs[cb_index].cb = cb;
- ev->ev_cbs[cb_index].user_cb_data = user_cb_data;
- mtx_unlock(&pk_ev_mstr.mtxs[evmgr]);
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb;
+ pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data;
return cb_index;
}