diff options
| author | Jonathan Bradley <jcb@pikum.xyz> | 2024-11-05 20:23:38 -0500 |
|---|---|---|
| committer | Jonathan Bradley <jcb@pikum.xyz> | 2024-11-05 20:23:38 -0500 |
| commit | fce5a4841f725cecaae5925b0e63144c24e5dc81 (patch) | |
| tree | 1f82a7edf773f92e428d6e83d091d15addbd2850 /pkev.h | |
| parent | 06677d6aa1d477253e65015101d1185e74ee8054 (diff) | |
pkev: data at every level
Diffstat (limited to 'pkev.h')
| -rw-r--r-- | pkev.h | 51 |
1 files changed, 32 insertions, 19 deletions
@@ -14,11 +14,11 @@ void pk_ev_teardown(); const pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); -typedef void (pk_ev_cb)(void *); +typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data); -const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr); -bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb *cb); -void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_data); +const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data); +bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data); +void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data); #endif /* PK_EV_H */ @@ -48,9 +48,15 @@ void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_data); # define PK_EV_GROW_RATIO 1.5 #endif +struct pk_ev_cb { + pk_ev_cb_fn *cb; + void *user_cb_data; +}; + struct pk_ev { - pk_ev_cb **cb; - atomic_uint_fast8_t n_cb; + struct pk_ev_cb *ev_cbs; + void *user_ev_data; + atomic_uint_fast8_t n_ev_cbs; }; struct pk_ev_mgr { @@ -107,8 +113,8 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { int i; struct pk_ev *ev; - size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (void *) * ev_count * cb_count); - size_t sz_ev = (sizeof(pk_ev_cb*) * cb_count); + size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (struct pk_ev_cb) * ev_count * cb_count); + size_t sz_ev = (sizeof(struct pk_ev_cb) * cb_count); size_t sz_evs = sizeof(struct pk_ev) * ev_count; struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz); @@ -121,8 +127,8 @@ pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) atomic_init(&mgr->n_ev, 0); for (i = 0; i < mgr->rn_ev; ++i) { ev = &mgr->ev[i]; - atomic_init(&ev->n_cb, 0); - ev->cb = (pk_ev_cb**)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i)); + atomic_init(&ev->n_ev_cbs, 0); + ev->ev_cbs = (struct pk_ev_cb*)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i)); } early_exit: @@ -139,8 +145,8 @@ pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) for (i = 0; i < old->n_ev; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; - memcpy(ev->cb, ev_old->cb, sizeof(pk_ev_cb*) * atomic_load(&ev_old->n_cb)); - atomic_store(&ev->n_cb, atomic_load(&ev_old->n_cb)); + memcpy(ev->ev_cbs, ev_old->ev_cbs, sizeof(struct pk_ev_cb) * atomic_load(&ev_old->n_ev_cbs)); + atomic_store(&ev->n_ev_cbs, atomic_load(&ev_old->n_ev_cbs)); } } @@ -188,7 +194,7 @@ pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) } inline const pk_ev_id_T -pk_ev_register_ev(pk_ev_mgr_id_T evmgr) +pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data) { assert(evmgr < 64); pk_ev_id_T id; @@ -201,34 +207,41 @@ pk_ev_register_ev(pk_ev_mgr_id_T evmgr) pk_ev_mstr.mgrs[evmgr] = mgr; } id = pk_ev_mstr.mgrs[evmgr]->n_ev++; + pk_ev_mstr.mgrs[evmgr]->ev[id].user_ev_data = user_ev_data; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return id; } inline bool -pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb *cb) +pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data) { assert(evmgr < 64); struct pk_ev_mgr *mgr; + uint8_t cb_index; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); - if (pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb == pk_ev_mstr.mgrs[evmgr]->rn_cb) { + if (pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) { mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } - pk_ev_mstr.mgrs[evmgr]->ev[evid].cb[pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb++] = cb; + cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs++; + pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb; + pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return true; } inline void -pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_data) +pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data) { assert(evmgr < 64); uint8_t i; - for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb; ++i) { - (*pk_ev_mstr.mgrs[evmgr]->ev[evid].cb[i])(user_data); + for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].n_ev_cbs; ++i) { + (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)( + pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data, + pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data, + user_emit_data); } } |
