11 #ifndef _RTE_RING_C11_PVT_H_ 12 #define _RTE_RING_C11_PVT_H_ 16 uint32_t new_val, uint32_t single, uint32_t enqueue)
26 rte_memory_order_relaxed);
28 rte_atomic_store_explicit(&ht->tail, new_val, rte_memory_order_release);
55 __rte_ring_move_prod_head(
struct rte_ring *r,
unsigned int is_sp,
57 uint32_t *old_head, uint32_t *new_head,
58 uint32_t *free_entries)
65 *old_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
76 cons_tail = rte_atomic_load_explicit(&r->cons.tail,
77 rte_memory_order_acquire);
84 *free_entries = (capacity + cons_tail - *old_head);
94 *new_head = *old_head + n;
96 r->prod.head = *new_head;
100 success = rte_atomic_compare_exchange_strong_explicit(&r->prod.head,
102 rte_memory_order_relaxed,
103 rte_memory_order_relaxed);
132 __rte_ring_move_cons_head(
struct rte_ring *r,
int is_sc,
134 uint32_t *old_head, uint32_t *new_head,
137 unsigned int max = n;
142 *old_head = rte_atomic_load_explicit(&r->cons.head, rte_memory_order_relaxed);
153 prod_tail = rte_atomic_load_explicit(&r->prod.tail,
154 rte_memory_order_acquire);
161 *entries = (prod_tail - *old_head);
170 *new_head = *old_head + n;
172 r->cons.head = *new_head;
176 success = rte_atomic_compare_exchange_strong_explicit(&r->cons.head,
178 rte_memory_order_relaxed,
179 rte_memory_order_relaxed);
#define __rte_always_inline
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
static void rte_atomic_thread_fence(rte_memory_order memorder)