5 #ifndef _RTE_RCU_QSBR_H_ 6 #define _RTE_RCU_QSBR_H_ 39 extern int rte_rcu_log_type;
40 #define RTE_LOGTYPE_RCU rte_rcu_log_type 42 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG 43 #define __RTE_RCU_DP_LOG(level, ...) \ 44 RTE_LOG_DP_LINE_PREFIX(level, RCU, "%s(): ", __func__, __VA_ARGS__) 46 #define __RTE_RCU_DP_LOG(level, ...) 49 #if defined(RTE_LIBRTE_RCU_DEBUG) 50 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, ...) do { \ 51 if (v->qsbr_cnt[thread_id].lock_cnt) \ 52 RTE_LOG_LINE_PREFIX(level, RCU, "%s(): ", __func__, __VA_ARGS__); \ 55 #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, ...) 62 #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(RTE_ATOMIC(uint64_t)) * 8) 63 #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \ 64 RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \ 65 __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE) 66 #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t __rte_atomic *) \ 67 ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i) 68 #define __RTE_QSBR_THRID_INDEX_SHIFT 6 69 #define __RTE_QSBR_THRID_MASK 0x3f 70 #define RTE_QSBR_THRID_INVALID 0xffffffff 74 RTE_ATOMIC(uint64_t) cnt;
80 RTE_ATOMIC(uint32_t) lock_cnt;
84 #define __RTE_QSBR_CNT_THR_OFFLINE 0 85 #define __RTE_QSBR_CNT_INIT 1 86 #define __RTE_QSBR_CNT_MAX ((uint64_t)~0) 87 #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t) 96 alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint64_t) token;
98 RTE_ATOMIC(uint64_t) acked_token;
103 alignas(RTE_CACHE_LINE_SIZE) uint32_t num_elems;
105 RTE_ATOMIC(uint32_t) num_threads;
107 uint32_t max_threads;
110 alignas(RTE_CACHE_LINE_SIZE)
struct rte_rcu_qsbr_cnt qsbr_cnt[];
133 #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE 143 #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1 187 struct rte_rcu_qsbr *
v;
196 struct rte_rcu_qsbr_dq;
300 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
302 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u",
303 v->qsbr_cnt[thread_id].lock_cnt);
310 t = rte_atomic_load_explicit(&v->token, rte_memory_order_relaxed);
315 rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
316 t, rte_memory_order_relaxed);
351 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
353 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u",
354 v->qsbr_cnt[thread_id].lock_cnt);
361 rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
362 __RTE_QSBR_CNT_THR_OFFLINE, rte_memory_order_release);
389 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
391 #if defined(RTE_LIBRTE_RCU_DEBUG) 393 rte_atomic_fetch_add_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
394 1, rte_memory_order_acquire);
422 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
424 #if defined(RTE_LIBRTE_RCU_DEBUG) 426 rte_atomic_fetch_sub_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
427 1, rte_memory_order_release);
429 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
430 "Lock counter %u. Nested locks?",
431 v->qsbr_cnt[thread_id].lock_cnt);
453 RTE_ASSERT(v != NULL);
460 t = rte_atomic_fetch_add_explicit(&v->token, 1, rte_memory_order_release) + 1;
482 RTE_ASSERT(v != NULL && thread_id < v->max_threads);
484 __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR,
"Lock counter %u",
485 v->qsbr_cnt[thread_id].lock_cnt);
492 t = rte_atomic_load_explicit(&v->token, rte_memory_order_acquire);
499 if (t != rte_atomic_load_explicit(&v->qsbr_cnt[thread_id].cnt, rte_memory_order_relaxed))
500 rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
501 t, rte_memory_order_release);
503 __RTE_RCU_DP_LOG(DEBUG,
"%s: update: token = %" PRIu64
", Thread ID = %d",
504 __func__, t, thread_id);
511 __rte_rcu_qsbr_check_selective(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
516 RTE_ATOMIC(uint64_t) *reg_thread_id;
517 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
519 for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
521 i++, reg_thread_id++) {
525 bmap = rte_atomic_load_explicit(reg_thread_id, rte_memory_order_acquire);
526 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
530 __RTE_RCU_DP_LOG(DEBUG,
531 "%s: check: token = %" PRIu64
", wait = %d, Bit Map = 0x%" PRIx64
", Thread ID = %d",
532 __func__, t, wait, bmap,
id + j);
533 c = rte_atomic_load_explicit(
534 &v->qsbr_cnt[
id + j].cnt,
535 rte_memory_order_acquire);
536 __RTE_RCU_DP_LOG(DEBUG,
537 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
538 __func__, t, wait, c,
id+j);
544 __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
553 bmap = rte_atomic_load_explicit(reg_thread_id,
554 rte_memory_order_acquire);
563 if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
574 if (acked_token != __RTE_QSBR_CNT_MAX)
575 rte_atomic_store_explicit(&v->acked_token, acked_token,
576 rte_memory_order_relaxed);
585 __rte_rcu_qsbr_check_all(
struct rte_rcu_qsbr *v, uint64_t t,
bool wait)
588 struct rte_rcu_qsbr_cnt *cnt;
590 uint64_t acked_token = __RTE_QSBR_CNT_MAX;
592 for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
593 __RTE_RCU_DP_LOG(DEBUG,
594 "%s: check: token = %" PRIu64
", wait = %d, Thread ID = %d",
595 __func__, t, wait, i);
597 c = rte_atomic_load_explicit(&cnt->cnt, rte_memory_order_acquire);
598 __RTE_RCU_DP_LOG(DEBUG,
599 "%s: status: token = %" PRIu64
", wait = %d, Thread QS cnt = %" PRIu64
", Thread ID = %d",
600 __func__, t, wait, c, i);
605 if (
likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
618 if (
likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
626 if (acked_token != __RTE_QSBR_CNT_MAX)
627 rte_atomic_store_explicit(&v->acked_token, acked_token,
628 rte_memory_order_relaxed);
667 uint64_t acked_token;
669 RTE_ASSERT(v != NULL);
672 acked_token = rte_atomic_load_explicit(&v->acked_token,
673 rte_memory_order_relaxed);
674 if (
likely(t <= acked_token)) {
675 __RTE_RCU_DP_LOG(DEBUG,
676 "%s: check: token = %" PRIu64
", wait = %d",
678 __RTE_RCU_DP_LOG(DEBUG,
679 "%s: status: least acked token = %" PRIu64,
680 __func__, acked_token);
684 if (
likely(v->num_threads == v->max_threads))
685 return __rte_rcu_qsbr_check_all(v, t, wait);
687 return __rte_rcu_qsbr_check_selective(v, t, wait);
742 struct rte_rcu_qsbr_dq *
800 unsigned int *freed,
unsigned int *pending,
unsigned int *available);
int rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_always_inline
struct rte_rcu_qsbr_dq * rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params)
int rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
static __rte_always_inline int rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
static __rte_always_inline void rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
#define __rte_cache_aligned
static __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
int rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n, unsigned int *freed, unsigned int *pending, unsigned int *available)
int rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
static __rte_always_inline void rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
void(* rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n)
rte_rcu_qsbr_free_resource_t free_fn
static void rte_pause(void)
uint32_t trigger_reclaim_limit
uint32_t max_reclaim_size
static unsigned int rte_ctz64(uint64_t v)
static void rte_atomic_thread_fence(rte_memory_order memorder)
static __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
static __rte_always_inline void rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v, __rte_unused unsigned int thread_id)
static __rte_always_inline uint64_t rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
int rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
size_t rte_rcu_qsbr_get_memsize(uint32_t max_threads)
int rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq)
void rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
int rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e)