DPDK  20.11.10
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
44 
104 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
105  unsigned int count, int socket_id, unsigned int flags);
106 
107 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
108 #pragma GCC diagnostic push
109 #pragma GCC diagnostic ignored "-Wstringop-overflow"
110 #pragma GCC diagnostic ignored "-Wstringop-overread"
111 #endif
112 
113 static __rte_always_inline void
114 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
115  uint32_t idx, const void *obj_table, uint32_t n)
116 {
117  unsigned int i;
118  uint32_t *ring = (uint32_t *)&r[1];
119  const uint32_t *obj = (const uint32_t *)obj_table;
120  if (likely(idx + n <= size)) {
121  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
122  ring[idx] = obj[i];
123  ring[idx + 1] = obj[i + 1];
124  ring[idx + 2] = obj[i + 2];
125  ring[idx + 3] = obj[i + 3];
126  ring[idx + 4] = obj[i + 4];
127  ring[idx + 5] = obj[i + 5];
128  ring[idx + 6] = obj[i + 6];
129  ring[idx + 7] = obj[i + 7];
130  }
131  switch (n & 0x7) {
132  case 7:
133  ring[idx++] = obj[i++]; /* fallthrough */
134  case 6:
135  ring[idx++] = obj[i++]; /* fallthrough */
136  case 5:
137  ring[idx++] = obj[i++]; /* fallthrough */
138  case 4:
139  ring[idx++] = obj[i++]; /* fallthrough */
140  case 3:
141  ring[idx++] = obj[i++]; /* fallthrough */
142  case 2:
143  ring[idx++] = obj[i++]; /* fallthrough */
144  case 1:
145  ring[idx++] = obj[i++]; /* fallthrough */
146  }
147  } else {
148  for (i = 0; idx < size; i++, idx++)
149  ring[idx] = obj[i];
150  /* Start at the beginning */
151  for (idx = 0; i < n; i++, idx++)
152  ring[idx] = obj[i];
153  }
154 }
155 
156 static __rte_always_inline void
157 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
158  const void *obj_table, uint32_t n)
159 {
160  unsigned int i;
161  const uint32_t size = r->size;
162  uint32_t idx = prod_head & r->mask;
163  uint64_t *ring = (uint64_t *)&r[1];
164  const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
165  if (likely(idx + n <= size)) {
166  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
167  ring[idx] = obj[i];
168  ring[idx + 1] = obj[i + 1];
169  ring[idx + 2] = obj[i + 2];
170  ring[idx + 3] = obj[i + 3];
171  }
172  switch (n & 0x3) {
173  case 3:
174  ring[idx++] = obj[i++]; /* fallthrough */
175  case 2:
176  ring[idx++] = obj[i++]; /* fallthrough */
177  case 1:
178  ring[idx++] = obj[i++];
179  }
180  } else {
181  for (i = 0; idx < size; i++, idx++)
182  ring[idx] = obj[i];
183  /* Start at the beginning */
184  for (idx = 0; i < n; i++, idx++)
185  ring[idx] = obj[i];
186  }
187 }
188 
189 static __rte_always_inline void
190 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
191  const void *obj_table, uint32_t n)
192 {
193  unsigned int i;
194  const uint32_t size = r->size;
195  uint32_t idx = prod_head & r->mask;
196  rte_int128_t *ring = (rte_int128_t *)&r[1];
197  const rte_int128_t *obj = (const rte_int128_t *)obj_table;
198  if (likely(idx + n <= size)) {
199  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
200  memcpy((void *)(ring + idx),
201  (const void *)(obj + i), 32);
202  switch (n & 0x1) {
203  case 1:
204  memcpy((void *)(ring + idx),
205  (const void *)(obj + i), 16);
206  }
207  } else {
208  for (i = 0; idx < size; i++, idx++)
209  memcpy((void *)(ring + idx),
210  (const void *)(obj + i), 16);
211  /* Start at the beginning */
212  for (idx = 0; i < n; i++, idx++)
213  memcpy((void *)(ring + idx),
214  (const void *)(obj + i), 16);
215  }
216 }
217 
218 /* the actual enqueue of elements on the ring.
219  * Placed here since identical code needed in both
220  * single and multi producer enqueue functions.
221  */
222 static __rte_always_inline void
223 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
224  const void *obj_table, uint32_t esize, uint32_t num)
225 {
226  /* 8B and 16B copies implemented individually to retain
227  * the current performance.
228  */
229  if (esize == 8)
230  __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
231  else if (esize == 16)
232  __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
233  else {
234  uint32_t idx, scale, nr_idx, nr_num, nr_size;
235 
236  /* Normalize to uint32_t */
237  scale = esize / sizeof(uint32_t);
238  nr_num = num * scale;
239  idx = prod_head & r->mask;
240  nr_idx = idx * scale;
241  nr_size = r->size * scale;
242  __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
243  obj_table, nr_num);
244  }
245 }
246 
247 static __rte_always_inline void
248 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
249  uint32_t idx, void *obj_table, uint32_t n)
250 {
251  unsigned int i;
252  uint32_t *ring = (uint32_t *)&r[1];
253  uint32_t *obj = (uint32_t *)obj_table;
254  if (likely(idx + n <= size)) {
255  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
256  obj[i] = ring[idx];
257  obj[i + 1] = ring[idx + 1];
258  obj[i + 2] = ring[idx + 2];
259  obj[i + 3] = ring[idx + 3];
260  obj[i + 4] = ring[idx + 4];
261  obj[i + 5] = ring[idx + 5];
262  obj[i + 6] = ring[idx + 6];
263  obj[i + 7] = ring[idx + 7];
264  }
265  switch (n & 0x7) {
266  case 7:
267  obj[i++] = ring[idx++]; /* fallthrough */
268  case 6:
269  obj[i++] = ring[idx++]; /* fallthrough */
270  case 5:
271  obj[i++] = ring[idx++]; /* fallthrough */
272  case 4:
273  obj[i++] = ring[idx++]; /* fallthrough */
274  case 3:
275  obj[i++] = ring[idx++]; /* fallthrough */
276  case 2:
277  obj[i++] = ring[idx++]; /* fallthrough */
278  case 1:
279  obj[i++] = ring[idx++]; /* fallthrough */
280  }
281  } else {
282  for (i = 0; idx < size; i++, idx++)
283  obj[i] = ring[idx];
284  /* Start at the beginning */
285  for (idx = 0; i < n; i++, idx++)
286  obj[i] = ring[idx];
287  }
288 }
289 
290 static __rte_always_inline void
291 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
292  void *obj_table, uint32_t n)
293 {
294  unsigned int i;
295  const uint32_t size = r->size;
296  uint32_t idx = prod_head & r->mask;
297  uint64_t *ring = (uint64_t *)&r[1];
298  unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
299  if (likely(idx + n <= size)) {
300  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
301  obj[i] = ring[idx];
302  obj[i + 1] = ring[idx + 1];
303  obj[i + 2] = ring[idx + 2];
304  obj[i + 3] = ring[idx + 3];
305  }
306  switch (n & 0x3) {
307  case 3:
308  obj[i++] = ring[idx++]; /* fallthrough */
309  case 2:
310  obj[i++] = ring[idx++]; /* fallthrough */
311  case 1:
312  obj[i++] = ring[idx++]; /* fallthrough */
313  }
314  } else {
315  for (i = 0; idx < size; i++, idx++)
316  obj[i] = ring[idx];
317  /* Start at the beginning */
318  for (idx = 0; i < n; i++, idx++)
319  obj[i] = ring[idx];
320  }
321 }
322 
323 static __rte_always_inline void
324 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
325  void *obj_table, uint32_t n)
326 {
327  unsigned int i;
328  const uint32_t size = r->size;
329  uint32_t idx = prod_head & r->mask;
330  rte_int128_t *ring = (rte_int128_t *)&r[1];
331  rte_int128_t *obj = (rte_int128_t *)obj_table;
332  if (likely(idx + n <= size)) {
333  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
334  memcpy((void *)(obj + i), (void *)(ring + idx), 32);
335  switch (n & 0x1) {
336  case 1:
337  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
338  }
339  } else {
340  for (i = 0; idx < size; i++, idx++)
341  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
342  /* Start at the beginning */
343  for (idx = 0; i < n; i++, idx++)
344  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
345  }
346 }
347 
348 /* the actual dequeue of elements from the ring.
349  * Placed here since identical code needed in both
350  * single and multi producer enqueue functions.
351  */
352 static __rte_always_inline void
353 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
354  void *obj_table, uint32_t esize, uint32_t num)
355 {
356  /* 8B and 16B copies implemented individually to retain
357  * the current performance.
358  */
359  if (esize == 8)
360  __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
361  else if (esize == 16)
362  __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
363  else {
364  uint32_t idx, scale, nr_idx, nr_num, nr_size;
365 
366  /* Normalize to uint32_t */
367  scale = esize / sizeof(uint32_t);
368  nr_num = num * scale;
369  idx = cons_head & r->mask;
370  nr_idx = idx * scale;
371  nr_size = r->size * scale;
372  __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
373  obj_table, nr_num);
374  }
375 }
376 
377 /* Between load and load. there might be cpu reorder in weak model
378  * (powerpc/arm).
379  * There are 2 choices for the users
380  * 1.use rmb() memory barrier
381  * 2.use one-direction load_acquire/store_release barrier
382  * It depends on performance test results.
383  * By default, move common functions to rte_ring_generic.h
384  */
385 #ifdef RTE_USE_C11_MEM_MODEL
386 #include "rte_ring_c11_mem.h"
387 #else
388 #include "rte_ring_generic.h"
389 #endif
390 
415 static __rte_always_inline unsigned int
416 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
417  unsigned int esize, unsigned int n,
418  enum rte_ring_queue_behavior behavior, unsigned int is_sp,
419  unsigned int *free_space)
420 {
421  uint32_t prod_head, prod_next;
422  uint32_t free_entries;
423 
424  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
425  &prod_head, &prod_next, &free_entries);
426  if (n == 0)
427  goto end;
428 
429  __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
430 
431  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
432 end:
433  if (free_space != NULL)
434  *free_space = free_entries - n;
435  return n;
436 }
437 
462 static __rte_always_inline unsigned int
463 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
464  unsigned int esize, unsigned int n,
465  enum rte_ring_queue_behavior behavior, unsigned int is_sc,
466  unsigned int *available)
467 {
468  uint32_t cons_head, cons_next;
469  uint32_t entries;
470 
471  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
472  &cons_head, &cons_next, &entries);
473  if (n == 0)
474  goto end;
475 
476  __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
477 
478  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
479 
480 end:
481  if (available != NULL)
482  *available = entries - n;
483  return n;
484 }
485 
508 static __rte_always_inline unsigned int
509 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
510  unsigned int esize, unsigned int n, unsigned int *free_space)
511 {
512  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
514 }
515 
537 static __rte_always_inline unsigned int
538 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
539  unsigned int esize, unsigned int n, unsigned int *free_space)
540 {
541  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
543 }
544 
545 #ifdef ALLOW_EXPERIMENTAL_API
546 #include <rte_ring_hts.h>
547 #include <rte_ring_rts.h>
548 #endif
549 
573 static __rte_always_inline unsigned int
574 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
575  unsigned int esize, unsigned int n, unsigned int *free_space)
576 {
577  switch (r->prod.sync_type) {
578  case RTE_RING_SYNC_MT:
579  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
580  free_space);
581  case RTE_RING_SYNC_ST:
582  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
583  free_space);
584 #ifdef ALLOW_EXPERIMENTAL_API
585  case RTE_RING_SYNC_MT_RTS:
586  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
587  free_space);
588  case RTE_RING_SYNC_MT_HTS:
589  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
590  free_space);
591 #endif
592  }
593 
594  /* valid ring should never reach this point */
595  RTE_ASSERT(0);
596  if (free_space != NULL)
597  *free_space = 0;
598  return 0;
599 }
600 
619 static __rte_always_inline int
620 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
621 {
622  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
623  -ENOBUFS;
624 }
625 
643 static __rte_always_inline int
644 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
645 {
646  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
647  -ENOBUFS;
648 }
649 
669 static __rte_always_inline int
670 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
671 {
672  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
673  -ENOBUFS;
674 }
675 
698 static __rte_always_inline unsigned int
699 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
700  unsigned int esize, unsigned int n, unsigned int *available)
701 {
702  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
704 }
705 
726 static __rte_always_inline unsigned int
727 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
728  unsigned int esize, unsigned int n, unsigned int *available)
729 {
730  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
732 }
733 
757 static __rte_always_inline unsigned int
758 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
759  unsigned int esize, unsigned int n, unsigned int *available)
760 {
761  switch (r->cons.sync_type) {
762  case RTE_RING_SYNC_MT:
763  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
764  available);
765  case RTE_RING_SYNC_ST:
766  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
767  available);
768 #ifdef ALLOW_EXPERIMENTAL_API
769  case RTE_RING_SYNC_MT_RTS:
770  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
771  n, available);
772  case RTE_RING_SYNC_MT_HTS:
773  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
774  n, available);
775 #endif
776  }
777 
778  /* valid ring should never reach this point */
779  RTE_ASSERT(0);
780  if (available != NULL)
781  *available = 0;
782  return 0;
783 }
784 
804 static __rte_always_inline int
805 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
806  unsigned int esize)
807 {
808  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
809  -ENOENT;
810 }
811 
828 static __rte_always_inline int
829 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
830  unsigned int esize)
831 {
832  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
833  -ENOENT;
834 }
835 
856 static __rte_always_inline int
857 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
858 {
859  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
860  -ENOENT;
861 }
862 
885 static __rte_always_inline unsigned int
886 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
887  unsigned int esize, unsigned int n, unsigned int *free_space)
888 {
889  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
891 }
892 
914 static __rte_always_inline unsigned int
915 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
916  unsigned int esize, unsigned int n, unsigned int *free_space)
917 {
918  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
920 }
921 
945 static __rte_always_inline unsigned int
946 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
947  unsigned int esize, unsigned int n, unsigned int *free_space)
948 {
949  switch (r->prod.sync_type) {
950  case RTE_RING_SYNC_MT:
951  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
952  free_space);
953  case RTE_RING_SYNC_ST:
954  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
955  free_space);
956 #ifdef ALLOW_EXPERIMENTAL_API
957  case RTE_RING_SYNC_MT_RTS:
958  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
959  n, free_space);
960  case RTE_RING_SYNC_MT_HTS:
961  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
962  n, free_space);
963 #endif
964  }
965 
966  /* valid ring should never reach this point */
967  RTE_ASSERT(0);
968  if (free_space != NULL)
969  *free_space = 0;
970  return 0;
971 }
972 
997 static __rte_always_inline unsigned int
998 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
999  unsigned int esize, unsigned int n, unsigned int *available)
1000 {
1001  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1003 }
1004 
1026 static __rte_always_inline unsigned int
1027 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1028  unsigned int esize, unsigned int n, unsigned int *available)
1029 {
1030  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1032 }
1033 
1057 static __rte_always_inline unsigned int
1058 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1059  unsigned int esize, unsigned int n, unsigned int *available)
1060 {
1061  switch (r->cons.sync_type) {
1062  case RTE_RING_SYNC_MT:
1063  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1064  available);
1065  case RTE_RING_SYNC_ST:
1066  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1067  available);
1068 #ifdef ALLOW_EXPERIMENTAL_API
1069  case RTE_RING_SYNC_MT_RTS:
1070  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1071  n, available);
1072  case RTE_RING_SYNC_MT_HTS:
1073  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1074  n, available);
1075 #endif
1076  }
1077 
1078  /* valid ring should never reach this point */
1079  RTE_ASSERT(0);
1080  if (available != NULL)
1081  *available = 0;
1082  return 0;
1083 }
1084 
1085 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000)
1086 #pragma GCC diagnostic pop
1087 #endif
1088 
1089 #ifdef ALLOW_EXPERIMENTAL_API
1090 #include <rte_ring_peek.h>
1091 #include <rte_ring_peek_zc.h>
1092 #endif
1093 
1094 #include <rte_ring.h>
1095 
1096 #ifdef __cplusplus
1097 }
1098 #endif
1099 
1100 #endif /* _RTE_RING_ELEM_H_ */
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
Definition: rte_common.h:231
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:250
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:193
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
#define likely(x)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:137
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:192
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:220
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:165
uint32_t size
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:223
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:164
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
uint32_t mask
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)