5 #ifndef _RTE_RCU_QSBR_H_
6 #define _RTE_RCU_QSBR_H_
38 extern int rte_rcu_log_type;
40 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
41 #define __RTE_RCU_DP_LOG(level, fmt, args...) \
42 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
43 "%s(): " fmt "\n", __func__, ## args)
45 #define __RTE_RCU_DP_LOG(level, fmt, args...)
48 #if defined(RTE_LIBRTE_RCU_DEBUG)
49 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\
50 if (v->qsbr_cnt[thread_id].lock_cnt) \
51 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
52 "%s(): " fmt "\n", __func__, ## args); \
55 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
62 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
63 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
64 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
65 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
66 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
67 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
68 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
69 #define __RTE_QSBR_THRID_MASK 0x3f
70 #define RTE_QSBR_THRID_INVALID 0xffffffff
73 struct rte_rcu_qsbr_cnt {
84 #define __RTE_QSBR_CNT_THR_OFFLINE 0
85 #define __RTE_QSBR_CNT_INIT 1
101 uint32_t max_threads;
104 struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
235 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
237 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
238 v->qsbr_cnt[thread_id].lock_cnt);
245 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
250 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
251 t, __ATOMIC_RELAXED);
260 #ifdef RTE_ARCH_X86_64
264 __atomic_thread_fence(__ATOMIC_SEQ_CST);
295 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
297 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
298 v->qsbr_cnt[thread_id].lock_cnt);
305 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
306 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
337 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
339 #if defined(RTE_LIBRTE_RCU_DEBUG)
341 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
342 1, __ATOMIC_ACQUIRE);
374 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
376 #if defined(RTE_LIBRTE_RCU_DEBUG)
378 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
379 1, __ATOMIC_RELEASE);
381 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
382 "Lock counter %u. Nested locks?\n",
383 v->qsbr_cnt[thread_id].lock_cnt);
409 RTE_ASSERT(v != NULL);
416 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
442 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
444 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
445 v->qsbr_cnt[thread_id].lock_cnt);
452 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
458 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
459 t, __ATOMIC_RELEASE);
461 __RTE_RCU_DP_LOG(DEBUG,
"%s: update: token = %"PRIu64
", Thread ID = %d",
462 __func__, t, thread_id);
469 __rte_rcu_qsbr_check_selective(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
474 uint64_t *reg_thread_id;
476 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
478 i++, reg_thread_id++) {
482 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
483 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
486 j = __builtin_ctzl(bmap);
487 __RTE_RCU_DP_LOG(DEBUG,
488 "%s: check: token = %"PRIu64
", wait = %d, Bit Map = 0x%"PRIx64
", Thread ID = %d",
489 __func__, t, wait, bmap,
id + j);
491 &v->qsbr_cnt[
id + j].cnt,
493 __RTE_RCU_DP_LOG(DEBUG,
494 "%s: status: token = %"PRIu64
", wait = %d, Thread QS cnt = %"PRIu64
", Thread ID = %d",
495 __func__, t, wait, c,
id+j);
500 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
509 bmap = __atomic_load_n(reg_thread_id,
526 __rte_rcu_qsbr_check_all(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
529 struct rte_rcu_qsbr_cnt *cnt;
532 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
533 __RTE_RCU_DP_LOG(DEBUG,
534 "%s: check: token = %"PRIu64
", wait = %d, Thread ID = %d",
535 __func__, t, wait, i);
537 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
538 __RTE_RCU_DP_LOG(DEBUG,
539 "%s: status: token = %"PRIu64
", wait = %d, Thread QS cnt = %"PRIu64
", Thread ID = %d",
540 __func__, t, wait, c, i);
544 if (
likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
596 RTE_ASSERT(v != NULL);
598 if (
likely(v->num_threads == v->max_threads))
599 return __rte_rcu_qsbr_check_all(v, t, wait);
601 return __rte_rcu_qsbr_check_selective(v, t, wait);
static __rte_experimental __rte_always_inline int rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
__rte_experimental int rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_always_inline
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
__rte_experimental int rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
static void rte_pause(void)
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
__rte_experimental int rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_experimental __rte_always_inline uint64_t rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
__rte_experimental void rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_cache_aligned
static __rte_experimental __rte_always_inline void rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
static void rte_smp_mb(void)
__rte_experimental size_t rte_rcu_qsbr_get_memsize(uint32_t max_threads)
__rte_experimental int rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)