5 #ifndef _RTE_RCU_QSBR_H_
6 #define _RTE_RCU_QSBR_H_
37 extern int rte_rcu_log_type;
39 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
40 #define __RTE_RCU_DP_LOG(level, fmt, args...) \
41 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
42 "%s(): " fmt "\n", __func__, ## args)
44 #define __RTE_RCU_DP_LOG(level, fmt, args...)
47 #if defined(RTE_LIBRTE_RCU_DEBUG)
48 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do {\
49 if (v->qsbr_cnt[thread_id].lock_cnt) \
50 rte_log(RTE_LOG_ ## level, rte_rcu_log_type, \
51 "%s(): " fmt "\n", __func__, ## args); \
54 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
61 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8)
62 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
63 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
64 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
65 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \
66 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
67 #define __RTE_QSBR_THRID_INDEX_SHIFT 6
68 #define __RTE_QSBR_THRID_MASK 0x3f
69 #define RTE_QSBR_THRID_INVALID 0xffffffff
72 struct rte_rcu_qsbr_cnt {
83 #define __RTE_QSBR_CNT_THR_OFFLINE 0
84 #define __RTE_QSBR_CNT_INIT 1
100 uint32_t max_threads;
125 size_t __rte_experimental
146 int __rte_experimental
172 int __rte_experimental
193 int __rte_experimental
229 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
231 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
232 v->qsbr_cnt[thread_id].lock_cnt);
239 t = __atomic_load_n(&v->token, __ATOMIC_RELAXED);
244 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
245 t, __ATOMIC_RELAXED);
254 #ifdef RTE_ARCH_X86_64
258 __atomic_thread_fence(__ATOMIC_SEQ_CST);
288 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
290 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
291 v->qsbr_cnt[thread_id].lock_cnt);
298 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
299 __RTE_QSBR_CNT_THR_OFFLINE, __ATOMIC_RELEASE);
329 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
331 #if defined(RTE_LIBRTE_RCU_DEBUG)
333 __atomic_fetch_add(&v->qsbr_cnt[thread_id].lock_cnt,
334 1, __ATOMIC_ACQUIRE);
365 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
367 #if defined(RTE_LIBRTE_RCU_DEBUG)
369 __atomic_fetch_sub(&v->qsbr_cnt[thread_id].lock_cnt,
370 1, __ATOMIC_RELEASE);
372 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
373 "Lock counter %u. Nested locks?\n",
374 v->qsbr_cnt[thread_id].lock_cnt);
399 RTE_ASSERT(v != NULL);
406 t = __atomic_add_fetch(&v->token, 1, __ATOMIC_RELEASE);
431 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
433 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u\n",
434 v->qsbr_cnt[thread_id].lock_cnt);
441 t = __atomic_load_n(&v->token, __ATOMIC_ACQUIRE);
447 __atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
448 t, __ATOMIC_RELEASE);
450 __RTE_RCU_DP_LOG(DEBUG,
"%s: update: token = %"PRIu64
", Thread ID = %d",
451 __func__, t, thread_id);
458 __rte_rcu_qsbr_check_selective(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
463 uint64_t *reg_thread_id;
465 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
467 i++, reg_thread_id++) {
471 bmap = __atomic_load_n(reg_thread_id, __ATOMIC_ACQUIRE);
472 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
475 j = __builtin_ctzl(bmap);
476 __RTE_RCU_DP_LOG(DEBUG,
477 "%s: check: token = %"PRIu64
", wait = %d, Bit Map = 0x%"PRIx64
", Thread ID = %d",
478 __func__, t, wait, bmap,
id + j);
480 &v->qsbr_cnt[
id + j].cnt,
482 __RTE_RCU_DP_LOG(DEBUG,
483 "%s: status: token = %"PRIu64
", wait = %d, Thread QS cnt = %"PRIu64
", Thread ID = %d",
484 __func__, t, wait, c,
id+j);
489 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
498 bmap = __atomic_load_n(reg_thread_id,
515 __rte_rcu_qsbr_check_all(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
518 struct rte_rcu_qsbr_cnt *cnt;
521 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
522 __RTE_RCU_DP_LOG(DEBUG,
523 "%s: check: token = %"PRIu64
", wait = %d, Thread ID = %d",
524 __func__, t, wait, i);
526 c = __atomic_load_n(&cnt->cnt, __ATOMIC_ACQUIRE);
527 __RTE_RCU_DP_LOG(DEBUG,
528 "%s: status: token = %"PRIu64
", wait = %d, Thread QS cnt = %"PRIu64
", Thread ID = %d",
529 __func__, t, wait, c, i);
533 if (
likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
584 RTE_ASSERT(v != NULL);
586 if (
likely(v->num_threads == v->max_threads))
587 return __rte_rcu_qsbr_check_all(v, t, wait);
589 return __rte_rcu_qsbr_check_selective(v, t, wait);
613 void __rte_experimental
634 int __rte_experimental