11 #ifndef _RTE_RING_ELEM_H_ 12 #define _RTE_RING_ELEM_H_ 105 unsigned int count,
int socket_id,
unsigned int flags);
107 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) 108 #pragma GCC diagnostic push 109 #pragma GCC diagnostic ignored "-Wstringop-overflow" 110 #pragma GCC diagnostic ignored "-Wstringop-overread" 114 __rte_ring_enqueue_elems_32(
struct rte_ring *r,
const uint32_t
size,
115 uint32_t idx,
const void *obj_table, uint32_t n)
118 uint32_t *ring = (uint32_t *)&r[1];
119 const uint32_t *obj = (
const uint32_t *)obj_table;
120 if (
likely(idx + n <= size)) {
121 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
123 ring[idx + 1] = obj[i + 1];
124 ring[idx + 2] = obj[i + 2];
125 ring[idx + 3] = obj[i + 3];
126 ring[idx + 4] = obj[i + 4];
127 ring[idx + 5] = obj[i + 5];
128 ring[idx + 6] = obj[i + 6];
129 ring[idx + 7] = obj[i + 7];
133 ring[idx++] = obj[i++];
135 ring[idx++] = obj[i++];
137 ring[idx++] = obj[i++];
139 ring[idx++] = obj[i++];
141 ring[idx++] = obj[i++];
143 ring[idx++] = obj[i++];
145 ring[idx++] = obj[i++];
148 for (i = 0; idx <
size; i++, idx++)
151 for (idx = 0; i < n; i++, idx++)
157 __rte_ring_enqueue_elems_64(
struct rte_ring *r, uint32_t prod_head,
158 const void *obj_table, uint32_t n)
161 const uint32_t size = r->
size;
162 uint32_t idx = prod_head & r->
mask;
163 uint64_t *ring = (uint64_t *)&r[1];
164 const unaligned_uint64_t *obj = (
const unaligned_uint64_t *)obj_table;
165 if (
likely(idx + n <= size)) {
166 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
168 ring[idx + 1] = obj[i + 1];
169 ring[idx + 2] = obj[i + 2];
170 ring[idx + 3] = obj[i + 3];
174 ring[idx++] = obj[i++];
176 ring[idx++] = obj[i++];
178 ring[idx++] = obj[i++];
181 for (i = 0; idx <
size; i++, idx++)
184 for (idx = 0; i < n; i++, idx++)
190 __rte_ring_enqueue_elems_128(
struct rte_ring *r, uint32_t prod_head,
191 const void *obj_table, uint32_t n)
194 const uint32_t size = r->
size;
195 uint32_t idx = prod_head & r->
mask;
196 rte_int128_t *ring = (rte_int128_t *)&r[1];
197 const rte_int128_t *obj = (
const rte_int128_t *)obj_table;
198 if (
likely(idx + n <= size)) {
199 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
200 memcpy((
void *)(ring + idx),
201 (
const void *)(obj + i), 32);
204 memcpy((
void *)(ring + idx),
205 (
const void *)(obj + i), 16);
208 for (i = 0; idx <
size; i++, idx++)
209 memcpy((
void *)(ring + idx),
210 (
const void *)(obj + i), 16);
212 for (idx = 0; i < n; i++, idx++)
213 memcpy((
void *)(ring + idx),
214 (
const void *)(obj + i), 16);
223 __rte_ring_enqueue_elems(
struct rte_ring *r, uint32_t prod_head,
224 const void *obj_table, uint32_t esize, uint32_t num)
230 __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
231 else if (esize == 16)
232 __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
234 uint32_t idx, scale, nr_idx, nr_num, nr_size;
237 scale = esize /
sizeof(uint32_t);
238 nr_num = num * scale;
239 idx = prod_head & r->
mask;
240 nr_idx = idx * scale;
241 nr_size = r->
size * scale;
242 __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
248 __rte_ring_dequeue_elems_32(
struct rte_ring *r,
const uint32_t size,
249 uint32_t idx,
void *obj_table, uint32_t n)
252 uint32_t *ring = (uint32_t *)&r[1];
253 uint32_t *obj = (uint32_t *)obj_table;
254 if (
likely(idx + n <= size)) {
255 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
257 obj[i + 1] = ring[idx + 1];
258 obj[i + 2] = ring[idx + 2];
259 obj[i + 3] = ring[idx + 3];
260 obj[i + 4] = ring[idx + 4];
261 obj[i + 5] = ring[idx + 5];
262 obj[i + 6] = ring[idx + 6];
263 obj[i + 7] = ring[idx + 7];
267 obj[i++] = ring[idx++];
269 obj[i++] = ring[idx++];
271 obj[i++] = ring[idx++];
273 obj[i++] = ring[idx++];
275 obj[i++] = ring[idx++];
277 obj[i++] = ring[idx++];
279 obj[i++] = ring[idx++];
282 for (i = 0; idx <
size; i++, idx++)
285 for (idx = 0; i < n; i++, idx++)
291 __rte_ring_dequeue_elems_64(
struct rte_ring *r, uint32_t prod_head,
292 void *obj_table, uint32_t n)
295 const uint32_t size = r->
size;
296 uint32_t idx = prod_head & r->
mask;
297 uint64_t *ring = (uint64_t *)&r[1];
298 unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
299 if (
likely(idx + n <= size)) {
300 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
302 obj[i + 1] = ring[idx + 1];
303 obj[i + 2] = ring[idx + 2];
304 obj[i + 3] = ring[idx + 3];
308 obj[i++] = ring[idx++];
310 obj[i++] = ring[idx++];
312 obj[i++] = ring[idx++];
315 for (i = 0; idx <
size; i++, idx++)
318 for (idx = 0; i < n; i++, idx++)
324 __rte_ring_dequeue_elems_128(
struct rte_ring *r, uint32_t prod_head,
325 void *obj_table, uint32_t n)
328 const uint32_t size = r->
size;
329 uint32_t idx = prod_head & r->
mask;
330 rte_int128_t *ring = (rte_int128_t *)&r[1];
331 rte_int128_t *obj = (rte_int128_t *)obj_table;
332 if (
likely(idx + n <= size)) {
333 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
334 memcpy((
void *)(obj + i), (
void *)(ring + idx), 32);
337 memcpy((
void *)(obj + i), (
void *)(ring + idx), 16);
340 for (i = 0; idx <
size; i++, idx++)
341 memcpy((
void *)(obj + i), (
void *)(ring + idx), 16);
343 for (idx = 0; i < n; i++, idx++)
344 memcpy((
void *)(obj + i), (
void *)(ring + idx), 16);
353 __rte_ring_dequeue_elems(
struct rte_ring *r, uint32_t cons_head,
354 void *obj_table, uint32_t esize, uint32_t num)
360 __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
361 else if (esize == 16)
362 __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
364 uint32_t idx, scale, nr_idx, nr_num, nr_size;
367 scale = esize /
sizeof(uint32_t);
368 nr_num = num * scale;
369 idx = cons_head & r->
mask;
370 nr_idx = idx * scale;
371 nr_size = r->
size * scale;
372 __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
385 #ifdef RTE_USE_C11_MEM_MODEL 386 #include "rte_ring_c11_mem.h" 388 #include "rte_ring_generic.h" 416 __rte_ring_do_enqueue_elem(
struct rte_ring *r,
const void *obj_table,
417 unsigned int esize,
unsigned int n,
419 unsigned int *free_space)
421 uint32_t prod_head, prod_next;
422 uint32_t free_entries;
424 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
425 &prod_head, &prod_next, &free_entries);
429 __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
431 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
433 if (free_space != NULL)
434 *free_space = free_entries - n;
463 __rte_ring_do_dequeue_elem(
struct rte_ring *r,
void *obj_table,
464 unsigned int esize,
unsigned int n,
466 unsigned int *available)
468 uint32_t cons_head, cons_next;
471 n = __rte_ring_move_cons_head(r, (
int)is_sc, n, behavior,
472 &cons_head, &cons_next, &entries);
476 __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
478 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
481 if (available != NULL)
482 *available = entries - n;
510 unsigned int esize,
unsigned int n,
unsigned int *free_space)
512 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
539 unsigned int esize,
unsigned int n,
unsigned int *free_space)
541 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
545 #ifdef ALLOW_EXPERIMENTAL_API 575 unsigned int esize,
unsigned int n,
unsigned int *free_space)
584 #ifdef ALLOW_EXPERIMENTAL_API 585 case RTE_RING_SYNC_MT_RTS:
588 case RTE_RING_SYNC_MT_HTS:
596 if (free_space != NULL)
700 unsigned int esize,
unsigned int n,
unsigned int *available)
702 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
728 unsigned int esize,
unsigned int n,
unsigned int *available)
730 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
759 unsigned int esize,
unsigned int n,
unsigned int *available)
768 #ifdef ALLOW_EXPERIMENTAL_API 769 case RTE_RING_SYNC_MT_RTS:
772 case RTE_RING_SYNC_MT_HTS:
780 if (available != NULL)
887 unsigned int esize,
unsigned int n,
unsigned int *free_space)
889 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
916 unsigned int esize,
unsigned int n,
unsigned int *free_space)
918 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
947 unsigned int esize,
unsigned int n,
unsigned int *free_space)
956 #ifdef ALLOW_EXPERIMENTAL_API 957 case RTE_RING_SYNC_MT_RTS:
960 case RTE_RING_SYNC_MT_HTS:
968 if (free_space != NULL)
999 unsigned int esize,
unsigned int n,
unsigned int *available)
1001 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1028 unsigned int esize,
unsigned int n,
unsigned int *available)
1030 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1059 unsigned int esize,
unsigned int n,
unsigned int *available)
1068 #ifdef ALLOW_EXPERIMENTAL_API 1069 case RTE_RING_SYNC_MT_RTS:
1072 case RTE_RING_SYNC_MT_HTS:
1080 if (available != NULL)
1085 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) 1086 #pragma GCC diagnostic pop 1089 #ifdef ALLOW_EXPERIMENTAL_API static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
enum rte_ring_sync_type sync_type
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)