10 #ifndef _RTE_RING_C11_MEM_H_
11 #define _RTE_RING_C11_MEM_H_
14 update_tail(
struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
15 uint32_t single, uint32_t enqueue)
24 while (
unlikely(ht->tail != old_val))
27 __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);
54 __rte_ring_move_prod_head(
struct rte_ring *r,
unsigned int is_sp,
55 unsigned int n,
enum rte_ring_queue_behavior behavior,
56 uint32_t *old_head, uint32_t *new_head,
57 uint32_t *free_entries)
64 *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_RELAXED);
70 __atomic_thread_fence(__ATOMIC_ACQUIRE);
75 cons_tail = __atomic_load_n(&r->cons.tail,
83 *free_entries = (capacity + cons_tail - *old_head);
87 n = (behavior == RTE_RING_QUEUE_FIXED) ?
93 *new_head = *old_head + n;
95 r->prod.head = *new_head, success = 1;
98 success = __atomic_compare_exchange_n(&r->prod.head,
130 __rte_ring_move_cons_head(
struct rte_ring *r,
int is_sc,
131 unsigned int n,
enum rte_ring_queue_behavior behavior,
132 uint32_t *old_head, uint32_t *new_head,
135 unsigned int max = n;
140 *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);
146 __atomic_thread_fence(__ATOMIC_ACQUIRE);
151 prod_tail = __atomic_load_n(&r->prod.tail,
159 *entries = (prod_tail - *old_head);
163 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
168 *new_head = *old_head + n;
170 r->cons.head = *new_head, success = 1;
173 success = __atomic_compare_exchange_n(&r->cons.head,