5 #ifndef _RTE_RCU_QSBR_H_ 6 #define _RTE_RCU_QSBR_H_ 43 extern int rte_rcu_log_type;
45 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 46 #define __RTE_RCU_DP_LOG(level, fmt, args...) \ 47 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \ 48 "%s(): " fmt "\n", __func__, ## args) 50 #define __RTE_RCU_DP_LOG(level, fmt, args...) 53 #if defined(RTE_LIBRTE_RCU_DEBUG) 54 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\ 55 if (v->qsbr_cnt[thread_id].lock_cnt) \ 56 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \ 57 "%s(): " fmt "\n", __func__, ## args); \ 60 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) 67 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8) 68 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \ 69 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \ 70 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE) 71 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \ 72 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i) 73 #define __RTE_QSBR_THRID_INDEX_SHIFT 6 74 #define __RTE_QSBR_THRID_MASK 0x3f 75 #define RTE_QSBR_THRID_INVALID 0xffffffff 78 struct rte_rcu_qsbr_cnt {
89 #define __RTE_QSBR_CNT_THR_OFFLINE 0 90 #define __RTE_QSBR_CNT_INIT 1 91 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0) 102 uint64_t acked_token;
109 uint32_t num_threads;
111 uint32_t max_threads;
114 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
245 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
247 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
248 v->qsbr_cnt[thread_id].lock_cnt);
255 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
260 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
261 t, __ATOMIC_RELAXED);
270 #ifdef RTE_ARCH_X86_64 274 __atomic_thread_fence(__ATOMIC_SEQ_CST);
305 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
307 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
308 v->qsbr_cnt[thread_id].lock_cnt);
315 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
316 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
347 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
349 #if defined(RTE_LIBRTE_RCU_DEBUG) 351 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
352 1, __ATOMIC_ACQUIRE);
384 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
386 #if defined(RTE_LIBRTE_RCU_DEBUG) 388 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
389 1, __ATOMIC_RELEASE);
391 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
392 "Lock counter %u. Nested locks?\n",
393 v->qsbr_cnt[thread_id].lock_cnt);
419 RTE_ASSERT(v != NULL);
426 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
452 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
454 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
455 v->qsbr_cnt[thread_id].lock_cnt);
462 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
469 if (t != __atomic_load_n(&v->qsbr_cnt[thread_id].cnt, __ATOMIC_RELAXED))
470 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
471 t, __ATOMIC_RELEASE);
473 __RTE_RCU_DP_LOG(DEBUG,
"%s: update: token = %" PRIu64
", Thread ID = %d",
474 __func__, t, thread_id);
481 __rte_rcu_qsbr_check_selective(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
486 uint64_t *reg_thread_id;
487 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
489 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
491 i++, reg_thread_id++) {
495 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
496 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
499 j = __builtin_ctzl(bmap);
500 __RTE_RCU_DP_LOG(DEBUG,
501 "%s: check: token = %" PRIu64
", wait = %d, Bit Map = 0x%" PRIx64
", Thread ID = %d",
502 __func__, t, wait, bmap,
id + j);
504 &v->qsbr_cnt[
id + j].cnt,
506 __RTE_RCU_DP_LOG(DEBUG,
507 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
508 __func__, t, wait, c,
id+j);
514 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
523 bmap = __atomic_load_n(reg_thread_id,
533 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
544 if (acked_token != __RTE_QSBR_CNT_MAX)
545 __atomic_store_n(&v->acked_token, acked_token,
555 __rte_rcu_qsbr_check_all(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
558 struct rte_rcu_qsbr_cnt *cnt;
560 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
562 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
563 __RTE_RCU_DP_LOG(DEBUG,
564 "%s: check: token = %" PRIu64
", wait = %d, Thread ID = %d",
565 __func__, t, wait, i);
567 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
568 __RTE_RCU_DP_LOG(DEBUG,
569 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
570 __func__, t, wait, c, i);
575 if (
likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
588 if (
likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
596 if (acked_token != __RTE_QSBR_CNT_MAX)
597 __atomic_store_n(&v->acked_token, acked_token,
641 RTE_ASSERT(v != NULL);
644 if (
likely(t <= v->acked_token))
647 if (
likely(v->num_threads == v->max_threads))
648 return __rte_rcu_qsbr_check_all(v, t, wait);
650 return __rte_rcu_qsbr_check_selective(v, t, wait);
static __rte_experimental __rte_always_inline int rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
__rte_experimental int rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_always_inline
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
__rte_experimental int rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
static void rte_pause(void)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
__rte_experimental int rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_experimental __rte_always_inline uint64_t rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
__rte_experimental void rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
#define __rte_cache_aligned
static void rte_smp_mb(void)
__rte_experimental size_t rte_rcu_qsbr_get_memsize(uint32_t max_threads)
__rte_experimental int rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)