#ifndef PK_EV_H #define PK_EV_H #include "./pkmem.h" /* deleteme */ #include typedef uint64_t pk_ev_mgr_id_T; typedef uint64_t pk_ev_id_T; typedef uint64_t pk_ev_cb_id_T; const pk_ev_mgr_id_T pk_ev_mgr_id_T_MAX = 0xFFFFFFFFFFFFFFFF; const pk_ev_id_T pk_ev_id_T_MAX = 0xFFFFFFFFFFFFFFFF; const pk_ev_cb_id_T pk_ev_cb_id_T_MAX = 0xFFFFFFFFFFFFFFFF; // TODO re-think threading // note: pk_ev_init() is NOT thread-safe void pk_ev_init(struct pk_membucket *bkt); // note: pk_ev_teardown() is NOT thread-safe void pk_ev_teardown(); pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); typedef void (pk_ev_cb_fn)(void *user_event_data, void *user_cb_data, void *user_ev_data); pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data); pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data); void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data); void pk_ev_unregister_ev(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid); void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid); #endif /* PK_EV_H */ #ifdef PK_IMPL_EV #include "pkmacros.h" /* deleteme */ #include #include #include #include #include #ifndef PK_EV_INIT_MGR_COUNT # define PK_EV_INIT_MGR_COUNT 1 #endif #ifndef PK_EV_INIT_EV_COUNT # define PK_EV_INIT_EV_COUNT 16 #endif #ifndef PK_EV_INIT_CB_COUNT # define PK_EV_INIT_CB_COUNT 8 #endif #ifndef PK_EV_GROW_RATIO # define PK_EV_GROW_RATIO 1.5 #endif // hard limits // PK_EV_MAX_EV_COUNT would require a refactor for keeping track of used slots #define PK_EV_MAX_EV_COUNT 64 // PK_EV_MAX_CB_COUNT could be increased as desired #define PK_EV_MAX_CB_COUNT 255 #ifndef PK_EV_MEM_ALLOC # define PK_EV_MEM_ALLOC(sz, alignment, bkt) pk_new(sz, alignment, bkt) #endif #ifndef PK_EV_MEM_FREE # define PK_EV_MEM_FREE(ptr, sz, bkt) pk_delete(ptr, sz, bkt) #endif struct pk_ev_cb { pk_ev_cb_fn *cb; void *user_cb_data; }; struct pk_ev { struct pk_ev_cb *ev_cbs; void *user_ev_data; atomic_uint_fast64_t left_ev_cbs; atomic_uint_fast64_t right_ev_cbs; }; struct pk_ev_mgr { struct pk_ev *ev; atomic_uint_fast64_t left_evs; atomic_uint_fast64_t right_evs; atomic_uint_fast64_t unused_evs; // reserved length of `pk_ev`s on this struct atomic_uint_fast64_t rn_ev; // on any given `pk_ev`, the number of callbacks reserved atomic_uint_fast64_t rn_cb; }; struct pk_ev_mstr { atomic_uint_fast64_t flg_mgrs; atomic_uint_fast64_t rn_mgrs; struct pk_ev_mgr **mgrs; mtx_t *mtxs; struct pk_membucket *bkt; }; struct pk_ev_mstr pk_ev_mstr; void pk_ev_init(struct pk_membucket* bkt) { int i; pk_ev_mstr.bkt = bkt; pk_ev_mstr.mgrs = (struct pk_ev_mgr **)PK_EV_MEM_ALLOC(sizeof(void *) * PK_EV_INIT_MGR_COUNT, alignof(void *), bkt); pk_ev_mstr.mtxs = (mtx_t*)PK_EV_MEM_ALLOC(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT, alignof(mtx_t), bkt); memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) { mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain); } atomic_store(&pk_ev_mstr.flg_mgrs, 0lu); atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT); } size_t pk_ev_inner_calc_sz(uint64_t ev_count, uint64_t cb_count, size_t *sz_ev_list, size_t *sz_ev_cb_list) { // base sizes size_t l_sz_ev_list = sizeof(struct pk_ev) * ev_count; size_t l_sz_ev_cb_list = sizeof(struct pk_ev_cb) * cb_count; l_sz_ev_list += ((size_t)64 - alignof(struct pk_ev)) % (size_t)64; l_sz_ev_cb_list += ((size_t)64 - alignof(struct pk_ev_cb)) % (size_t)64; if (sz_ev_list != nullptr) *sz_ev_list = l_sz_ev_list; if (sz_ev_cb_list != nullptr) *sz_ev_cb_list = l_sz_ev_cb_list; size_t ret = sizeof(struct pk_ev_mgr); ret += l_sz_ev_list; ret += l_sz_ev_cb_list * ev_count; return ret; } void pk_ev_teardown() { long unsigned int i; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((atomic_load(&pk_ev_mstr.flg_mgrs) & (1lu << i)) == 0lu) continue; mtx_lock(&pk_ev_mstr.mtxs[i]); size_t sz = pk_ev_inner_calc_sz( atomic_load(&pk_ev_mstr.mgrs[i]->rn_ev), atomic_load(&pk_ev_mstr.mgrs[i]->rn_cb), NULL, NULL ); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[i], sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[i] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[i]); mtx_destroy(&pk_ev_mstr.mtxs[i]); } PK_EV_MEM_FREE(pk_ev_mstr.mgrs, sizeof(void *) * atomic_load(&pk_ev_mstr.rn_mgrs), pk_ev_mstr.bkt); PK_EV_MEM_FREE(pk_ev_mstr.mtxs, sizeof(mtx_t) * atomic_load(&pk_ev_mstr.rn_mgrs), pk_ev_mstr.bkt); pk_ev_mstr.mgrs = NULL; pk_ev_mstr.mtxs = NULL; } static struct pk_ev_mgr* pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { assert(ev_count < 0x100); assert(cb_count < 0x100); uint64_t i; char *ptr; struct pk_ev *ev; size_t sz_ev_list; size_t sz_ev_cb_list; size_t sz_offset; size_t sz = pk_ev_inner_calc_sz(ev_count, cb_count, &sz_ev_list, &sz_ev_cb_list); struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)PK_EV_MEM_ALLOC(sz, alignof(struct pk_ev_mgr), pk_ev_mstr.bkt); if (mgr == NULL) goto early_exit; ptr = ((char *)mgr) + sizeof(struct pk_ev_mgr); sz_offset = (size_t)ptr % alignof(struct pk_ev); ptr += ((size_t)64 - sz_offset) % (size_t)64; mgr->ev = (struct pk_ev*)ptr; atomic_init(&mgr->rn_ev, ev_count); atomic_init(&mgr->rn_cb, cb_count); atomic_init(&mgr->left_evs, 0); atomic_init(&mgr->right_evs, 0); atomic_init(&mgr->unused_evs, 0xFFFFFFFFFFFFFFFF); // find mem-aligned beginning of cb array ptr += sz_ev_list; sz_offset = (size_t)ptr % alignof(struct pk_ev_cb); ptr += ((size_t)64 - sz_offset) % (size_t)64; for (i = 0; i < ev_count; ++i) { ev = &mgr->ev[i]; atomic_init(&ev->left_ev_cbs, 0); atomic_init(&ev->right_ev_cbs, 0); sz_offset = sz_ev_cb_list * i; ev->ev_cbs = (struct pk_ev_cb*)(ptr + sz_offset); } /* debug fprintf(stdout, "[%s] mgr: sz: %lu, ev_count: %lu, cb_count: %lu \n", __FILE__, sz, ev_count, cb_count); fprintf(stdout, "\t%p - ptr\n", (void*)mgr); fprintf(stdout, "\t%p - evs (+%lu)\n", (void*)mgr->ev, (char*)mgr->ev - (char*)mgr); fprintf(stdout, "\t%p - cbs (+%lu)\n", (void*)mgr->ev[0].ev_cbs, (char*)mgr->ev[0].ev_cbs - (char*)mgr); */ early_exit: return mgr; } static void pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) { uint64_t i, ii; uint64_t u, uu; struct pk_ev *ev_old; struct pk_ev *ev; ii = atomic_load(&old->right_evs); atomic_store(&mgr->left_evs, atomic_load(&old->left_evs)); atomic_store(&mgr->right_evs, ii); atomic_store(&mgr->unused_evs, atomic_load(&old->unused_evs)); for (i = 0; i < ii; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; ev->user_ev_data = ev_old->user_ev_data; uu = atomic_load(&ev_old->right_ev_cbs); for (u = 0; u <= uu; ++u) { ev->ev_cbs[u].cb = ev_old->ev_cbs[u].cb; ev->ev_cbs[u].user_cb_data = ev_old->ev_cbs[u].user_cb_data; } atomic_store(&ev->left_ev_cbs, atomic_load(&ev_old->left_ev_cbs)); atomic_store(&ev->right_ev_cbs, atomic_load(&ev_old->right_ev_cbs)); } } pk_ev_mgr_id_T pk_ev_create_mgr() { uint64_t i; pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; pk_ev_mgr_id_T id; struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT); if (mgr == NULL) return -1; start: flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((flg & (1lu << i)) == 0) break; } if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) { goto recreate; } id = i; flg_new |= (1lu << i); if (atomic_compare_exchange_weak(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } pk_ev_mstr.mgrs[id]= mgr; return id; recreate: // TODO recreate mgr, out of space assert(1 == 0 && "[pkev.h] Out of mgr space."); exit(1); goto start; } void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) { pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; assert(evmgr < pk_ev_mstr.rn_mgrs); mtx_lock(&pk_ev_mstr.mtxs[evmgr]); size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = NULL; flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; flg_new &= ~(1lu << evmgr); if (atomic_compare_exchange_weak(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); } pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr, void *user_ev_data) { assert(evmgr < 64); uint64_t new_size; uint64_t i, ii, flg; pk_ev_id_T id; struct pk_ev_mgr *mgr = nullptr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); mgr = pk_ev_mstr.mgrs[evmgr]; if (mgr->left_evs == mgr->right_evs && mgr->right_evs == mgr->rn_ev) { new_size = PK_MAX(2, PK_MIN(PK_EV_MAX_EV_COUNT, mgr->rn_ev * PK_EV_GROW_RATIO)); if (new_size == mgr->rn_ev) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow ev count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(new_size, pk_ev_mstr.mgrs[evmgr]->rn_cb); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = mgr; } id = atomic_load(&mgr->left_evs); flg = atomic_load(&mgr->unused_evs); if (mgr->left_evs != mgr->right_evs) { i = atomic_load(&mgr->left_evs) + 1; ii = atomic_load(&mgr->rn_ev); for (; i <= ii; ++i) { if (flg & (1lu << i)) { break; } } atomic_store(&mgr->left_evs, i); } else { atomic_store(&mgr->left_evs, atomic_load(&mgr->left_evs) + 1); atomic_store(&mgr->right_evs, atomic_load(&mgr->right_evs) + 1); } atomic_store(&mgr->unused_evs, flg & ~(1lu << id)); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); mgr->ev[id].user_ev_data = user_ev_data; return id; } pk_ev_cb_id_T pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_fn *cb, void *user_cb_data) { assert(evmgr < PK_EV_INIT_MGR_COUNT); bool found = false; uint64_t new_size, i; struct pk_ev_mgr *mgr = nullptr; pk_ev_cb_id_T cb_index; if (pk_ev_mstr.mgrs[evmgr] == nullptr) { PK_LOGV_ERR("[pkev.h] unknown manager: '%lu'.\n", evmgr); exit(1); } for (i = pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (found == false && pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb != nullptr) { found = true; cb_index = i; continue; } if (found == false) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs = i; break; } } if (found == false) { mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs == pk_ev_mstr.mgrs[evmgr]->rn_cb) { size_t old_sz = pk_ev_inner_calc_sz(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb, NULL, NULL); new_size = PK_MAX(2, PK_MIN(PK_EV_MAX_CB_COUNT, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO)); if (new_size == pk_ev_mstr.mgrs[evmgr]->rn_cb) { PK_LOG_ERR("[pkev.h] need more room, but failed to grow cb count.\n"); mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); exit(1); } mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, new_size); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); PK_EV_MEM_FREE(pk_ev_mstr.mgrs[evmgr], old_sz, pk_ev_mstr.bkt); pk_ev_mstr.mgrs[evmgr] = mgr; mgr = nullptr; } cb_index = pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs++; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); if (cb_index == pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs) { pk_ev_mstr.mgrs[evmgr]->ev[evid].left_ev_cbs++; } } pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].cb = cb; pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[cb_index].user_cb_data = user_cb_data; return cb_index; } void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, void *user_emit_data) { assert(evmgr < PK_EV_INIT_MGR_COUNT); uint8_t i; for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].right_ev_cbs; ++i) { if (pk_ev_mstr.mgrs[evmgr] == nullptr) continue; if (pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb == nullptr) continue; (*pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].cb)( pk_ev_mstr.mgrs[evmgr]->ev[evid].user_ev_data, pk_ev_mstr.mgrs[evmgr]->ev[evid].ev_cbs[i].user_cb_data, user_emit_data); } } void pk_ev_unregister_ev(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid) { assert(evmgr <= pk_ev_mstr.rn_mgrs); struct pk_ev_mgr *mgr = pk_ev_mstr.mgrs[evmgr]; assert(evid <= mgr->right_evs); if (mgr == nullptr) return; mgr->ev[evid].user_ev_data = NULL; atomic_store(&mgr->ev[evid].left_ev_cbs, 0); atomic_store(&mgr->ev[evid].right_ev_cbs, 0); for (uint64_t u = 0; u < mgr->rn_cb; ++u) { mgr->ev[evid].ev_cbs[u].cb = NULL; mgr->ev[evid].ev_cbs[u].user_cb_data = NULL; } atomic_store(&mgr->unused_evs, atomic_load(&mgr->unused_evs) | (1lu << evid)); if (evid < atomic_load(&mgr->left_evs)) { atomic_store(&mgr->left_evs, evid); } } void pk_ev_unregister_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb_id_T cbid) { struct pk_ev_mgr *mgr = pk_ev_mstr.mgrs[evmgr]; if (mgr == nullptr) return; if (mgr->ev[evid].left_ev_cbs > cbid) { mgr->ev[evid].left_ev_cbs = cbid; } mgr->ev[evid].ev_cbs[cbid].cb = nullptr; mgr->ev[evid].ev_cbs[cbid].user_cb_data = nullptr; } #endif /* PK_IMPL_EV */