#ifndef PK_EV_H #define PK_EV_H #include typedef uint64_t pk_ev_mgr_id_T; typedef uint64_t pk_ev_id_T; // note: pk_ev_init() is NOT thread-safe void pk_ev_init(); // note: pk_ev_teardown() is NOT thread-safe void pk_ev_teardown(); const pk_ev_mgr_id_T pk_ev_create_mgr(); void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr); typedef void (pk_ev_cb)(); const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr); bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb *cb); void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid); #endif /* PK_EV_H */ #ifdef PK_IMPL_EV #include #include #include #include #include #include #include #ifndef PK_EV_INIT_MGR_COUNT # define PK_EV_INIT_MGR_COUNT 1 #endif #ifndef PK_EV_INIT_EV_COUNT # define PK_EV_INIT_EV_COUNT 16 #endif #ifndef PK_EV_INIT_CB_COUNT # define PK_EV_INIT_CB_COUNT 8 #endif #ifndef PK_EV_GROW_RATIO # define PK_EV_GROW_RATIO 1.5 #endif struct pk_ev { pk_ev_cb **cb; atomic_uint_fast8_t n_cb; }; struct pk_ev_mgr { struct pk_ev *ev; atomic_uint_fast8_t n_ev; atomic_uint_fast8_t rn_ev; atomic_uint_fast8_t rn_cb; }; struct pk_ev_mstr { atomic_uint_fast64_t flg_mgrs; atomic_uint_fast64_t rn_mgrs; struct pk_ev_mgr **mgrs; mtx_t *mtxs; }; struct pk_ev_mstr pk_ev_mstr; inline void pk_ev_init() { int i; pk_ev_mstr.mgrs = (struct pk_ev_mgr **)malloc(sizeof(void *) * PK_EV_INIT_MGR_COUNT); pk_ev_mstr.mtxs = (mtx_t*)malloc(sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mgrs, 0, sizeof(void *) * PK_EV_INIT_MGR_COUNT); memset(pk_ev_mstr.mtxs, 0, sizeof(mtx_t) * PK_EV_INIT_MGR_COUNT); for (i = 0; i < PK_EV_INIT_MGR_COUNT; ++i) { mtx_init(&pk_ev_mstr.mtxs[i], mtx_plain); } atomic_store(&pk_ev_mstr.flg_mgrs, 0); atomic_store(&pk_ev_mstr.rn_mgrs, PK_EV_INIT_MGR_COUNT); } inline void pk_ev_teardown() { int i; for (i = 0; i < pk_ev_mstr.rn_mgrs; ++i) { if ((atomic_load(&pk_ev_mstr.rn_mgrs) & (1lu << i)) == 0) continue; mtx_lock(&pk_ev_mstr.mtxs[i]); free(pk_ev_mstr.mgrs[i]); pk_ev_mstr.mgrs[i] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[i]); mtx_destroy(&pk_ev_mstr.mtxs[i]); } free(pk_ev_mstr.mgrs); free(pk_ev_mstr.mtxs); pk_ev_mstr.mgrs = NULL; pk_ev_mstr.mtxs = NULL; } static inline struct pk_ev_mgr* pk_ev_inner_ev_mgr_create(uint64_t ev_count, uint64_t cb_count) { int i; struct pk_ev *ev; size_t sz = sizeof(struct pk_ev_mgr) + ((sizeof(struct pk_ev) * ev_count)) + (sizeof (void *) * ev_count * cb_count); size_t sz_ev = (sizeof(pk_ev_cb*) * cb_count); size_t sz_evs = sizeof(struct pk_ev) * ev_count; struct pk_ev_mgr *mgr = (struct pk_ev_mgr*)malloc(sz); if (mgr == NULL) goto early_exit; memset(mgr, 0, sz); mgr->ev = (struct pk_ev*)(((char *)mgr) + sizeof(struct pk_ev_mgr)); atomic_init(&mgr->rn_ev, ev_count); atomic_init(&mgr->rn_cb, cb_count); atomic_init(&mgr->n_ev, 0); for (i = 0; i < mgr->rn_ev; ++i) { ev = &mgr->ev[i]; atomic_init(&ev->n_cb, 0); ev->cb = (pk_ev_cb**)(((char *)mgr) + sizeof(struct pk_ev_mgr) + sz_evs + (sz_ev * i)); } early_exit: return mgr; } static inline void pk_ev_inner_ev_mgr_clone(struct pk_ev_mgr *old, struct pk_ev_mgr *mgr) { int i; struct pk_ev *ev_old; struct pk_ev *ev; atomic_store(&mgr->n_ev, atomic_load(&old->n_ev)); for (i = 0; i < old->n_ev; ++i) { ev_old = &old->ev[i]; ev = &mgr->ev[i]; memcpy(ev->cb, ev_old->cb, sizeof(pk_ev_cb*) * atomic_load(&ev_old->n_cb)); atomic_store(&ev->n_cb, atomic_load(&ev_old->n_cb)); } } inline const pk_ev_mgr_id_T pk_ev_create_mgr() { uint64_t i; pk_ev_mgr_id_T flg; pk_ev_mgr_id_T flg_new; pk_ev_mgr_id_T id; struct pk_ev_mgr *mgr = pk_ev_inner_ev_mgr_create(PK_EV_INIT_EV_COUNT, PK_EV_INIT_CB_COUNT); if (mgr == NULL) return -1; start: flg = atomic_load(&pk_ev_mstr.flg_mgrs); while (1) { flg_new = flg; for (i = 0; i < atomic_load(&pk_ev_mstr.rn_mgrs); ++i) { if ((flg & (1u << i)) == 0) break; } if (i == atomic_load(&pk_ev_mstr.rn_mgrs)) { goto recreate; } id = i; flg_new |= (1u << i); if (atomic_compare_exchange_strong(&pk_ev_mstr.flg_mgrs, &flg, flg_new)) break; thrd_yield(); } pk_ev_mstr.mgrs[id]= mgr; return id; recreate: // TODO recreate mgr, out of space assert(1 == 0 && "[pkev.h] Out of mgr space."); exit(1); goto start; } inline void pk_ev_destroy_mgr(pk_ev_mgr_id_T evmgr) { assert(evmgr >= 0); mtx_lock(&pk_ev_mstr.mtxs[evmgr]); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = NULL; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); } inline const pk_ev_id_T pk_ev_register_ev(pk_ev_mgr_id_T evmgr) { assert(evmgr < 64); pk_ev_id_T id; struct pk_ev_mgr *mgr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->n_ev == pk_ev_mstr.mgrs[evmgr]->rn_ev) { mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev * PK_EV_GROW_RATIO, pk_ev_mstr.mgrs[evmgr]->rn_cb); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } id = pk_ev_mstr.mgrs[evmgr]->n_ev++; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return id; } inline bool pk_ev_register_cb(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid, pk_ev_cb *cb) { assert(evmgr < 64); struct pk_ev_mgr *mgr; mtx_lock(&pk_ev_mstr.mtxs[evmgr]); if (pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb == pk_ev_mstr.mgrs[evmgr]->rn_cb) { mgr = pk_ev_inner_ev_mgr_create(pk_ev_mstr.mgrs[evmgr]->rn_ev, pk_ev_mstr.mgrs[evmgr]->rn_cb * PK_EV_GROW_RATIO); pk_ev_inner_ev_mgr_clone(pk_ev_mstr.mgrs[evmgr], mgr); free(pk_ev_mstr.mgrs[evmgr]); pk_ev_mstr.mgrs[evmgr] = mgr; } pk_ev_mstr.mgrs[evmgr]->ev[evid].cb[pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb++] = cb; mtx_unlock(&pk_ev_mstr.mtxs[evmgr]); return true; } inline void pk_ev_emit(pk_ev_mgr_id_T evmgr, pk_ev_id_T evid) { assert(evmgr < 64); uint8_t i; for (i = 0; i < pk_ev_mstr.mgrs[evmgr]->ev[evid].n_cb; ++i) { (*pk_ev_mstr.mgrs[evmgr]->ev[evid].cb[i])(); } } #endif