5 #ifndef _RTE_RCU_QSBR_H_ 6 #define _RTE_RCU_QSBR_H_ 40 extern int rte_rcu_log_type;
42 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 43 #define __RTE_RCU_DP_LOG(level, fmt, args...) \ 44 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \ 45 "%s(): " fmt "\n", __func__, ## args) 47 #define __RTE_RCU_DP_LOG(level, fmt, args...) 50 #if defined(RTE_LIBRTE_RCU_DEBUG) 51 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\ 52 if (v->qsbr_cnt[thread_id].lock_cnt) \ 53 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \ 54 "%s(): " fmt "\n", __func__, ## args); \ 57 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) 64 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8) 65 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \ 66 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \ 67 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE) 68 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \ 69 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i) 70 #define __RTE_QSBR_THRID_INDEX_SHIFT 6 71 #define __RTE_QSBR_THRID_MASK 0x3f 72 #define RTE_QSBR_THRID_INVALID 0xffffffff 75 struct rte_rcu_qsbr_cnt {
86 #define __RTE_QSBR_CNT_THR_OFFLINE 0 87 #define __RTE_QSBR_CNT_INIT 1 88 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0) 89 #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t) 100 uint64_t acked_token;
107 uint32_t num_threads;
109 uint32_t max_threads;
112 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
135 #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE 145 #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1 189 struct rte_rcu_qsbr *
v;
198 struct rte_rcu_qsbr_dq;
303 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
305 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
306 v->qsbr_cnt[thread_id].lock_cnt);
313 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
318 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
319 t, __ATOMIC_RELAXED);
354 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
356 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
357 v->qsbr_cnt[thread_id].lock_cnt);
364 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
365 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
392 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
394 #if defined(RTE_LIBRTE_RCU_DEBUG) 396 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
397 1, __ATOMIC_ACQUIRE);
425 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
427 #if defined(RTE_LIBRTE_RCU_DEBUG) 429 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
430 1, __ATOMIC_RELEASE);
432 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
433 "Lock counter %u. Nested locks?\n",
434 v->qsbr_cnt[thread_id].lock_cnt);
456 RTE_ASSERT(v != NULL);
463 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
485 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
487 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
488 v->qsbr_cnt[thread_id].lock_cnt);
495 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
502 if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
503 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
504 t, __ATOMIC_RELEASE);
506 __RTE_RCU_DP_LOG(DEBUG,
"%s: update: token = %" PRIu64
", Thread ID = %d",
507 __func__, t, thread_id);
514 __rte_rcu_qsbr_check_selective(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
519 uint64_t *reg_thread_id;
520 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
522 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
524 i++, reg_thread_id++) {
528 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
529 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
532 j = __builtin_ctzl(bmap);
533 __RTE_RCU_DP_LOG(DEBUG,
534 "%s: check: token = %" PRIu64
", wait = %d, Bit Map = 0x%" PRIx64
", Thread ID = %d",
535 __func__, t, wait, bmap,
id + j);
537 &v->qsbr_cnt[
id + j].cnt,
539 __RTE_RCU_DP_LOG(DEBUG,
540 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
541 __func__, t, wait, c,
id+j);
547 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
556 bmap = __atomic_load_n(reg_thread_id,
566 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
577 if (acked_token != __RTE_QSBR_CNT_MAX)
578 __atomic_store_n(&v->acked_token, acked_token,
588 __rte_rcu_qsbr_check_all(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
591 struct rte_rcu_qsbr_cnt *cnt;
593 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
595 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
596 __RTE_RCU_DP_LOG(DEBUG,
597 "%s: check: token = %" PRIu64
", wait = %d, Thread ID = %d",
598 __func__, t, wait, i);
600 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
601 __RTE_RCU_DP_LOG(DEBUG,
602 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
603 __func__, t, wait, c, i);
608 if (
likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
621 if (
likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
629 if (acked_token != __RTE_QSBR_CNT_MAX)
630 __atomic_store_n(&v->acked_token, acked_token,
670 RTE_ASSERT(v != NULL);
673 if (
likely(t <= v->acked_token)) {
674 __RTE_RCU_DP_LOG(DEBUG,
675 "%s: check: token = %" PRIu64
", wait = %d",
677 __RTE_RCU_DP_LOG(DEBUG,
678 "%s: status: least acked token = %" PRIu64,
679 __func__, v->acked_token);
683 if (
likely(v->num_threads == v->max_threads))
684 return __rte_rcu_qsbr_check_all(v, t, wait);
686 return __rte_rcu_qsbr_check_selective(v, t, wait);
745 struct rte_rcu_qsbr_dq *
811 unsigned int *freed,
unsigned int *pending,
unsigned int *available);
int rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_always_inline
__rte_experimental int rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n, unsigned int *freed, unsigned int *pending, unsigned int *available)
__rte_experimental int rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq)
int rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
static __rte_always_inline int rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
static __rte_always_inline void rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
int rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
static __rte_always_inline void rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
void(* rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n)
rte_rcu_qsbr_free_resource_t free_fn
static void rte_pause(void)
uint32_t trigger_reclaim_limit
uint32_t max_reclaim_size
#define __rte_cache_aligned
static __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
static void rte_atomic_thread_fence(int memorder)
static __rte_always_inline void rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
__rte_experimental struct rte_rcu_qsbr_dq * rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params)
static __rte_always_inline uint64_t rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
int rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
size_t rte_rcu_qsbr_get_memsize(uint32_t max_threads)
__rte_experimental int rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e)
void rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)