DPDK  20.05.0
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 
46 __rte_experimental
47 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
48 
112 __rte_experimental
113 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
114  unsigned int count, int socket_id, unsigned int flags);
115 
116 static __rte_always_inline void
117 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
118  uint32_t idx, const void *obj_table, uint32_t n)
119 {
120  unsigned int i;
121  uint32_t *ring = (uint32_t *)&r[1];
122  const uint32_t *obj = (const uint32_t *)obj_table;
123  if (likely(idx + n < size)) {
124  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
125  ring[idx] = obj[i];
126  ring[idx + 1] = obj[i + 1];
127  ring[idx + 2] = obj[i + 2];
128  ring[idx + 3] = obj[i + 3];
129  ring[idx + 4] = obj[i + 4];
130  ring[idx + 5] = obj[i + 5];
131  ring[idx + 6] = obj[i + 6];
132  ring[idx + 7] = obj[i + 7];
133  }
134  switch (n & 0x7) {
135  case 7:
136  ring[idx++] = obj[i++]; /* fallthrough */
137  case 6:
138  ring[idx++] = obj[i++]; /* fallthrough */
139  case 5:
140  ring[idx++] = obj[i++]; /* fallthrough */
141  case 4:
142  ring[idx++] = obj[i++]; /* fallthrough */
143  case 3:
144  ring[idx++] = obj[i++]; /* fallthrough */
145  case 2:
146  ring[idx++] = obj[i++]; /* fallthrough */
147  case 1:
148  ring[idx++] = obj[i++]; /* fallthrough */
149  }
150  } else {
151  for (i = 0; idx < size; i++, idx++)
152  ring[idx] = obj[i];
153  /* Start at the beginning */
154  for (idx = 0; i < n; i++, idx++)
155  ring[idx] = obj[i];
156  }
157 }
158 
159 static __rte_always_inline void
160 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
161  const void *obj_table, uint32_t n)
162 {
163  unsigned int i;
164  const uint32_t size = r->size;
165  uint32_t idx = prod_head & r->mask;
166  uint64_t *ring = (uint64_t *)&r[1];
167  const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
168  if (likely(idx + n < size)) {
169  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
170  ring[idx] = obj[i];
171  ring[idx + 1] = obj[i + 1];
172  ring[idx + 2] = obj[i + 2];
173  ring[idx + 3] = obj[i + 3];
174  }
175  switch (n & 0x3) {
176  case 3:
177  ring[idx++] = obj[i++]; /* fallthrough */
178  case 2:
179  ring[idx++] = obj[i++]; /* fallthrough */
180  case 1:
181  ring[idx++] = obj[i++];
182  }
183  } else {
184  for (i = 0; idx < size; i++, idx++)
185  ring[idx] = obj[i];
186  /* Start at the beginning */
187  for (idx = 0; i < n; i++, idx++)
188  ring[idx] = obj[i];
189  }
190 }
191 
192 static __rte_always_inline void
193 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
194  const void *obj_table, uint32_t n)
195 {
196  unsigned int i;
197  const uint32_t size = r->size;
198  uint32_t idx = prod_head & r->mask;
199  rte_int128_t *ring = (rte_int128_t *)&r[1];
200  const rte_int128_t *obj = (const rte_int128_t *)obj_table;
201  if (likely(idx + n < size)) {
202  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
203  memcpy((void *)(ring + idx),
204  (const void *)(obj + i), 32);
205  switch (n & 0x1) {
206  case 1:
207  memcpy((void *)(ring + idx),
208  (const void *)(obj + i), 16);
209  }
210  } else {
211  for (i = 0; idx < size; i++, idx++)
212  memcpy((void *)(ring + idx),
213  (const void *)(obj + i), 16);
214  /* Start at the beginning */
215  for (idx = 0; i < n; i++, idx++)
216  memcpy((void *)(ring + idx),
217  (const void *)(obj + i), 16);
218  }
219 }
220 
221 /* the actual enqueue of elements on the ring.
222  * Placed here since identical code needed in both
223  * single and multi producer enqueue functions.
224  */
225 static __rte_always_inline void
226 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
227  const void *obj_table, uint32_t esize, uint32_t num)
228 {
229  /* 8B and 16B copies implemented individually to retain
230  * the current performance.
231  */
232  if (esize == 8)
233  __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
234  else if (esize == 16)
235  __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
236  else {
237  uint32_t idx, scale, nr_idx, nr_num, nr_size;
238 
239  /* Normalize to uint32_t */
240  scale = esize / sizeof(uint32_t);
241  nr_num = num * scale;
242  idx = prod_head & r->mask;
243  nr_idx = idx * scale;
244  nr_size = r->size * scale;
245  __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
246  obj_table, nr_num);
247  }
248 }
249 
250 static __rte_always_inline void
251 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
252  uint32_t idx, void *obj_table, uint32_t n)
253 {
254  unsigned int i;
255  uint32_t *ring = (uint32_t *)&r[1];
256  uint32_t *obj = (uint32_t *)obj_table;
257  if (likely(idx + n < size)) {
258  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
259  obj[i] = ring[idx];
260  obj[i + 1] = ring[idx + 1];
261  obj[i + 2] = ring[idx + 2];
262  obj[i + 3] = ring[idx + 3];
263  obj[i + 4] = ring[idx + 4];
264  obj[i + 5] = ring[idx + 5];
265  obj[i + 6] = ring[idx + 6];
266  obj[i + 7] = ring[idx + 7];
267  }
268  switch (n & 0x7) {
269  case 7:
270  obj[i++] = ring[idx++]; /* fallthrough */
271  case 6:
272  obj[i++] = ring[idx++]; /* fallthrough */
273  case 5:
274  obj[i++] = ring[idx++]; /* fallthrough */
275  case 4:
276  obj[i++] = ring[idx++]; /* fallthrough */
277  case 3:
278  obj[i++] = ring[idx++]; /* fallthrough */
279  case 2:
280  obj[i++] = ring[idx++]; /* fallthrough */
281  case 1:
282  obj[i++] = ring[idx++]; /* fallthrough */
283  }
284  } else {
285  for (i = 0; idx < size; i++, idx++)
286  obj[i] = ring[idx];
287  /* Start at the beginning */
288  for (idx = 0; i < n; i++, idx++)
289  obj[i] = ring[idx];
290  }
291 }
292 
293 static __rte_always_inline void
294 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
295  void *obj_table, uint32_t n)
296 {
297  unsigned int i;
298  const uint32_t size = r->size;
299  uint32_t idx = prod_head & r->mask;
300  uint64_t *ring = (uint64_t *)&r[1];
301  unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
302  if (likely(idx + n < size)) {
303  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
304  obj[i] = ring[idx];
305  obj[i + 1] = ring[idx + 1];
306  obj[i + 2] = ring[idx + 2];
307  obj[i + 3] = ring[idx + 3];
308  }
309  switch (n & 0x3) {
310  case 3:
311  obj[i++] = ring[idx++]; /* fallthrough */
312  case 2:
313  obj[i++] = ring[idx++]; /* fallthrough */
314  case 1:
315  obj[i++] = ring[idx++]; /* fallthrough */
316  }
317  } else {
318  for (i = 0; idx < size; i++, idx++)
319  obj[i] = ring[idx];
320  /* Start at the beginning */
321  for (idx = 0; i < n; i++, idx++)
322  obj[i] = ring[idx];
323  }
324 }
325 
326 static __rte_always_inline void
327 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
328  void *obj_table, uint32_t n)
329 {
330  unsigned int i;
331  const uint32_t size = r->size;
332  uint32_t idx = prod_head & r->mask;
333  rte_int128_t *ring = (rte_int128_t *)&r[1];
334  rte_int128_t *obj = (rte_int128_t *)obj_table;
335  if (likely(idx + n < size)) {
336  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
337  memcpy((void *)(obj + i), (void *)(ring + idx), 32);
338  switch (n & 0x1) {
339  case 1:
340  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
341  }
342  } else {
343  for (i = 0; idx < size; i++, idx++)
344  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
345  /* Start at the beginning */
346  for (idx = 0; i < n; i++, idx++)
347  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
348  }
349 }
350 
351 /* the actual dequeue of elements from the ring.
352  * Placed here since identical code needed in both
353  * single and multi producer enqueue functions.
354  */
355 static __rte_always_inline void
356 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
357  void *obj_table, uint32_t esize, uint32_t num)
358 {
359  /* 8B and 16B copies implemented individually to retain
360  * the current performance.
361  */
362  if (esize == 8)
363  __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
364  else if (esize == 16)
365  __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
366  else {
367  uint32_t idx, scale, nr_idx, nr_num, nr_size;
368 
369  /* Normalize to uint32_t */
370  scale = esize / sizeof(uint32_t);
371  nr_num = num * scale;
372  idx = cons_head & r->mask;
373  nr_idx = idx * scale;
374  nr_size = r->size * scale;
375  __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
376  obj_table, nr_num);
377  }
378 }
379 
380 /* Between load and load. there might be cpu reorder in weak model
381  * (powerpc/arm).
382  * There are 2 choices for the users
383  * 1.use rmb() memory barrier
384  * 2.use one-direction load_acquire/store_release barrier,defined by
385  * CONFIG_RTE_USE_C11_MEM_MODEL=y
386  * It depends on performance test results.
387  * By default, move common functions to rte_ring_generic.h
388  */
389 #ifdef RTE_USE_C11_MEM_MODEL
390 #include "rte_ring_c11_mem.h"
391 #else
392 #include "rte_ring_generic.h"
393 #endif
394 
419 static __rte_always_inline unsigned int
420 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
421  unsigned int esize, unsigned int n,
422  enum rte_ring_queue_behavior behavior, unsigned int is_sp,
423  unsigned int *free_space)
424 {
425  uint32_t prod_head, prod_next;
426  uint32_t free_entries;
427 
428  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
429  &prod_head, &prod_next, &free_entries);
430  if (n == 0)
431  goto end;
432 
433  __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
434 
435  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
436 end:
437  if (free_space != NULL)
438  *free_space = free_entries - n;
439  return n;
440 }
441 
466 static __rte_always_inline unsigned int
467 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
468  unsigned int esize, unsigned int n,
469  enum rte_ring_queue_behavior behavior, unsigned int is_sc,
470  unsigned int *available)
471 {
472  uint32_t cons_head, cons_next;
473  uint32_t entries;
474 
475  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
476  &cons_head, &cons_next, &entries);
477  if (n == 0)
478  goto end;
479 
480  __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
481 
482  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
483 
484 end:
485  if (available != NULL)
486  *available = entries - n;
487  return n;
488 }
489 
512 static __rte_always_inline unsigned int
513 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
514  unsigned int esize, unsigned int n, unsigned int *free_space)
515 {
516  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
518 }
519 
541 static __rte_always_inline unsigned int
542 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
543  unsigned int esize, unsigned int n, unsigned int *free_space)
544 {
545  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
547 }
548 
549 #ifdef ALLOW_EXPERIMENTAL_API
550 #include <rte_ring_hts.h>
551 #include <rte_ring_rts.h>
552 #endif
553 
577 static __rte_always_inline unsigned int
578 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
579  unsigned int esize, unsigned int n, unsigned int *free_space)
580 {
581  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
582  RTE_RING_QUEUE_FIXED, r->prod.sync_type, free_space);
583 
584  switch (r->prod.sync_type) {
585  case RTE_RING_SYNC_MT:
586  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
587  free_space);
588  case RTE_RING_SYNC_ST:
589  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
590  free_space);
591 #ifdef ALLOW_EXPERIMENTAL_API
592  case RTE_RING_SYNC_MT_RTS:
593  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
594  free_space);
595  case RTE_RING_SYNC_MT_HTS:
596  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
597  free_space);
598 #endif
599  }
600 
601  /* valid ring should never reach this point */
602  RTE_ASSERT(0);
603  if (free_space != NULL)
604  *free_space = 0;
605  return 0;
606 }
607 
626 static __rte_always_inline int
627 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
628 {
629  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
630  -ENOBUFS;
631 }
632 
650 static __rte_always_inline int
651 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
652 {
653  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
654  -ENOBUFS;
655 }
656 
676 static __rte_always_inline int
677 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
678 {
679  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
680  -ENOBUFS;
681 }
682 
705 static __rte_always_inline unsigned int
706 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
707  unsigned int esize, unsigned int n, unsigned int *available)
708 {
709  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
711 }
712 
733 static __rte_always_inline unsigned int
734 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
735  unsigned int esize, unsigned int n, unsigned int *available)
736 {
737  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
739 }
740 
764 static __rte_always_inline unsigned int
765 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
766  unsigned int esize, unsigned int n, unsigned int *available)
767 {
768  switch (r->cons.sync_type) {
769  case RTE_RING_SYNC_MT:
770  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
771  available);
772  case RTE_RING_SYNC_ST:
773  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
774  available);
775 #ifdef ALLOW_EXPERIMENTAL_API
776  case RTE_RING_SYNC_MT_RTS:
777  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
778  n, available);
779  case RTE_RING_SYNC_MT_HTS:
780  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
781  n, available);
782 #endif
783  }
784 
785  /* valid ring should never reach this point */
786  RTE_ASSERT(0);
787  if (available != NULL)
788  *available = 0;
789  return 0;
790 }
791 
811 static __rte_always_inline int
812 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
813  unsigned int esize)
814 {
815  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
816  -ENOENT;
817 }
818 
835 static __rte_always_inline int
836 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
837  unsigned int esize)
838 {
839  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
840  -ENOENT;
841 }
842 
863 static __rte_always_inline int
864 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
865 {
866  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
867  -ENOENT;
868 }
869 
892 static __rte_always_inline unsigned
893 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
894  unsigned int esize, unsigned int n, unsigned int *free_space)
895 {
896  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
898 }
899 
921 static __rte_always_inline unsigned
922 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
923  unsigned int esize, unsigned int n, unsigned int *free_space)
924 {
925  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
927 }
928 
952 static __rte_always_inline unsigned
953 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
954  unsigned int esize, unsigned int n, unsigned int *free_space)
955 {
956  switch (r->prod.sync_type) {
957  case RTE_RING_SYNC_MT:
958  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
959  free_space);
960  case RTE_RING_SYNC_ST:
961  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
962  free_space);
963 #ifdef ALLOW_EXPERIMENTAL_API
964  case RTE_RING_SYNC_MT_RTS:
965  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
966  n, free_space);
967  case RTE_RING_SYNC_MT_HTS:
968  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
969  n, free_space);
970 #endif
971  }
972 
973  /* valid ring should never reach this point */
974  RTE_ASSERT(0);
975  if (free_space != NULL)
976  *free_space = 0;
977  return 0;
978 }
979 
1004 static __rte_always_inline unsigned
1005 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1006  unsigned int esize, unsigned int n, unsigned int *available)
1007 {
1008  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1010 }
1011 
1033 static __rte_always_inline unsigned
1034 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1035  unsigned int esize, unsigned int n, unsigned int *available)
1036 {
1037  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1039 }
1040 
1064 static __rte_always_inline unsigned int
1065 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1066  unsigned int esize, unsigned int n, unsigned int *available)
1067 {
1068  switch (r->cons.sync_type) {
1069  case RTE_RING_SYNC_MT:
1070  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1071  available);
1072  case RTE_RING_SYNC_ST:
1073  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1074  available);
1075 #ifdef ALLOW_EXPERIMENTAL_API
1076  case RTE_RING_SYNC_MT_RTS:
1077  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1078  n, available);
1079  case RTE_RING_SYNC_MT_HTS:
1080  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1081  n, available);
1082 #endif
1083  }
1084 
1085  /* valid ring should never reach this point */
1086  RTE_ASSERT(0);
1087  if (available != NULL)
1088  *available = 0;
1089  return 0;
1090 }
1091 
1092 #ifdef ALLOW_EXPERIMENTAL_API
1093 #include <rte_ring_peek.h>
1094 #endif
1095 
1096 #include <rte_ring.h>
1097 
1098 #ifdef __cplusplus
1099 }
1100 #endif
1101 
1102 #endif /* _RTE_RING_ELEM_H_ */
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
Definition: rte_common.h:193
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
#define likely(x)
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:223
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:137
static __rte_experimental __rte_always_inline unsigned rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:250
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:192
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
__rte_experimental struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:165
static __rte_experimental __rte_always_inline unsigned rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:193
uint32_t size
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:164
static __rte_experimental __rte_always_inline unsigned rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:220
__rte_experimental ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
uint32_t mask
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)