DPDK 25.03.0-rc0
rte_ring_generic_pvt.h
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2010-2017 Intel Corporation
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5 * All rights reserved.
6 * Derived from FreeBSD's bufring.h
7 * Used as BSD-3 Licensed with permission from Kip Macy.
8 */
9
10#ifndef _RTE_RING_GENERIC_PVT_H_
11#define _RTE_RING_GENERIC_PVT_H_
12
13static __rte_always_inline void
14__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
15 uint32_t new_val, uint32_t single, uint32_t enqueue)
16{
17 if (enqueue)
19 else
21 /*
22 * If there are other enqueues/dequeues in progress that preceded us,
23 * we need to wait for them to complete
24 */
25 if (!single)
26 rte_wait_until_equal_32((volatile uint32_t *)(uintptr_t)&ht->tail, old_val,
27 rte_memory_order_relaxed);
28
29 ht->tail = new_val;
30}
31
55static __rte_always_inline unsigned int
56__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
57 unsigned int n, enum rte_ring_queue_behavior behavior,
58 uint32_t *old_head, uint32_t *new_head,
59 uint32_t *free_entries)
60{
61 const uint32_t capacity = r->capacity;
62 unsigned int max = n;
63 int success;
64
65 do {
66 /* Reset n to the initial burst count */
67 n = max;
68
69 *old_head = r->prod.head;
70
71 /* add rmb barrier to avoid load/load reorder in weak
72 * memory model. It is noop on x86
73 */
75
76 /*
77 * The subtraction is done between two unsigned 32bits value
78 * (the result is always modulo 32 bits even if we have
79 * *old_head > cons_tail). So 'free_entries' is always between 0
80 * and capacity (which is < size).
81 */
82 *free_entries = (capacity + r->cons.tail - *old_head);
83
84 /* check that we have enough room in ring */
85 if (unlikely(n > *free_entries))
86 n = (behavior == RTE_RING_QUEUE_FIXED) ?
87 0 : *free_entries;
88
89 if (n == 0)
90 return 0;
91
92 *new_head = *old_head + n;
93 if (is_sp) {
94 r->prod.head = *new_head;
95 success = 1;
96 } else
97 success = rte_atomic32_cmpset((uint32_t *)(uintptr_t)&r->prod.head,
98 *old_head, *new_head);
99 } while (unlikely(success == 0));
100 return n;
101}
102
126static __rte_always_inline unsigned int
127__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
128 unsigned int n, enum rte_ring_queue_behavior behavior,
129 uint32_t *old_head, uint32_t *new_head,
130 uint32_t *entries)
131{
132 unsigned int max = n;
133 int success;
134
135 /* move cons.head atomically */
136 do {
137 /* Restore n as it may change every loop */
138 n = max;
139
140 *old_head = r->cons.head;
141
142 /* add rmb barrier to avoid load/load reorder in weak
143 * memory model. It is noop on x86
144 */
145 rte_smp_rmb();
146
147 /* The subtraction is done between two unsigned 32bits value
148 * (the result is always modulo 32 bits even if we have
149 * cons_head > prod_tail). So 'entries' is always between 0
150 * and size(ring)-1.
151 */
152 *entries = (r->prod.tail - *old_head);
153
154 /* Set the actual entries for dequeue */
155 if (n > *entries)
156 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
157
158 if (unlikely(n == 0))
159 return 0;
160
161 *new_head = *old_head + n;
162 if (is_sc) {
163 r->cons.head = *new_head;
164 rte_smp_rmb();
165 success = 1;
166 } else {
167 success = rte_atomic32_cmpset((uint32_t *)(uintptr_t)&r->cons.head,
168 *old_head, *new_head);
169 }
170 } while (unlikely(success == 0));
171 return n;
172}
173
174#endif /* _RTE_RING_GENERIC_PVT_H_ */
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
static void rte_smp_wmb(void)
static void rte_smp_rmb(void)
#define unlikely(x)
#define __rte_always_inline
Definition: rte_common.h:413
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
Definition: rte_pause.h:95
rte_ring_queue_behavior
Definition: rte_ring_core.h:40
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:42
uint32_t capacity