DPDK  20.08.0
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
44 
105 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
106  unsigned int count, int socket_id, unsigned int flags);
107 
108 static __rte_always_inline void
109 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
110  uint32_t idx, const void *obj_table, uint32_t n)
111 {
112  unsigned int i;
113  uint32_t *ring = (uint32_t *)&r[1];
114  const uint32_t *obj = (const uint32_t *)obj_table;
115  if (likely(idx + n < size)) {
116  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
117  ring[idx] = obj[i];
118  ring[idx + 1] = obj[i + 1];
119  ring[idx + 2] = obj[i + 2];
120  ring[idx + 3] = obj[i + 3];
121  ring[idx + 4] = obj[i + 4];
122  ring[idx + 5] = obj[i + 5];
123  ring[idx + 6] = obj[i + 6];
124  ring[idx + 7] = obj[i + 7];
125  }
126  switch (n & 0x7) {
127  case 7:
128  ring[idx++] = obj[i++]; /* fallthrough */
129  case 6:
130  ring[idx++] = obj[i++]; /* fallthrough */
131  case 5:
132  ring[idx++] = obj[i++]; /* fallthrough */
133  case 4:
134  ring[idx++] = obj[i++]; /* fallthrough */
135  case 3:
136  ring[idx++] = obj[i++]; /* fallthrough */
137  case 2:
138  ring[idx++] = obj[i++]; /* fallthrough */
139  case 1:
140  ring[idx++] = obj[i++]; /* fallthrough */
141  }
142  } else {
143  for (i = 0; idx < size; i++, idx++)
144  ring[idx] = obj[i];
145  /* Start at the beginning */
146  for (idx = 0; i < n; i++, idx++)
147  ring[idx] = obj[i];
148  }
149 }
150 
151 static __rte_always_inline void
152 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
153  const void *obj_table, uint32_t n)
154 {
155  unsigned int i;
156  const uint32_t size = r->size;
157  uint32_t idx = prod_head & r->mask;
158  uint64_t *ring = (uint64_t *)&r[1];
159  const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
160  if (likely(idx + n < size)) {
161  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
162  ring[idx] = obj[i];
163  ring[idx + 1] = obj[i + 1];
164  ring[idx + 2] = obj[i + 2];
165  ring[idx + 3] = obj[i + 3];
166  }
167  switch (n & 0x3) {
168  case 3:
169  ring[idx++] = obj[i++]; /* fallthrough */
170  case 2:
171  ring[idx++] = obj[i++]; /* fallthrough */
172  case 1:
173  ring[idx++] = obj[i++];
174  }
175  } else {
176  for (i = 0; idx < size; i++, idx++)
177  ring[idx] = obj[i];
178  /* Start at the beginning */
179  for (idx = 0; i < n; i++, idx++)
180  ring[idx] = obj[i];
181  }
182 }
183 
184 static __rte_always_inline void
185 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
186  const void *obj_table, uint32_t n)
187 {
188  unsigned int i;
189  const uint32_t size = r->size;
190  uint32_t idx = prod_head & r->mask;
191  rte_int128_t *ring = (rte_int128_t *)&r[1];
192  const rte_int128_t *obj = (const rte_int128_t *)obj_table;
193  if (likely(idx + n < size)) {
194  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
195  memcpy((void *)(ring + idx),
196  (const void *)(obj + i), 32);
197  switch (n & 0x1) {
198  case 1:
199  memcpy((void *)(ring + idx),
200  (const void *)(obj + i), 16);
201  }
202  } else {
203  for (i = 0; idx < size; i++, idx++)
204  memcpy((void *)(ring + idx),
205  (const void *)(obj + i), 16);
206  /* Start at the beginning */
207  for (idx = 0; i < n; i++, idx++)
208  memcpy((void *)(ring + idx),
209  (const void *)(obj + i), 16);
210  }
211 }
212 
213 /* the actual enqueue of elements on the ring.
214  * Placed here since identical code needed in both
215  * single and multi producer enqueue functions.
216  */
217 static __rte_always_inline void
218 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
219  const void *obj_table, uint32_t esize, uint32_t num)
220 {
221  /* 8B and 16B copies implemented individually to retain
222  * the current performance.
223  */
224  if (esize == 8)
225  __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
226  else if (esize == 16)
227  __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
228  else {
229  uint32_t idx, scale, nr_idx, nr_num, nr_size;
230 
231  /* Normalize to uint32_t */
232  scale = esize / sizeof(uint32_t);
233  nr_num = num * scale;
234  idx = prod_head & r->mask;
235  nr_idx = idx * scale;
236  nr_size = r->size * scale;
237  __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
238  obj_table, nr_num);
239  }
240 }
241 
242 static __rte_always_inline void
243 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
244  uint32_t idx, void *obj_table, uint32_t n)
245 {
246  unsigned int i;
247  uint32_t *ring = (uint32_t *)&r[1];
248  uint32_t *obj = (uint32_t *)obj_table;
249  if (likely(idx + n < size)) {
250  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
251  obj[i] = ring[idx];
252  obj[i + 1] = ring[idx + 1];
253  obj[i + 2] = ring[idx + 2];
254  obj[i + 3] = ring[idx + 3];
255  obj[i + 4] = ring[idx + 4];
256  obj[i + 5] = ring[idx + 5];
257  obj[i + 6] = ring[idx + 6];
258  obj[i + 7] = ring[idx + 7];
259  }
260  switch (n & 0x7) {
261  case 7:
262  obj[i++] = ring[idx++]; /* fallthrough */
263  case 6:
264  obj[i++] = ring[idx++]; /* fallthrough */
265  case 5:
266  obj[i++] = ring[idx++]; /* fallthrough */
267  case 4:
268  obj[i++] = ring[idx++]; /* fallthrough */
269  case 3:
270  obj[i++] = ring[idx++]; /* fallthrough */
271  case 2:
272  obj[i++] = ring[idx++]; /* fallthrough */
273  case 1:
274  obj[i++] = ring[idx++]; /* fallthrough */
275  }
276  } else {
277  for (i = 0; idx < size; i++, idx++)
278  obj[i] = ring[idx];
279  /* Start at the beginning */
280  for (idx = 0; i < n; i++, idx++)
281  obj[i] = ring[idx];
282  }
283 }
284 
285 static __rte_always_inline void
286 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
287  void *obj_table, uint32_t n)
288 {
289  unsigned int i;
290  const uint32_t size = r->size;
291  uint32_t idx = prod_head & r->mask;
292  uint64_t *ring = (uint64_t *)&r[1];
293  unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
294  if (likely(idx + n < size)) {
295  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
296  obj[i] = ring[idx];
297  obj[i + 1] = ring[idx + 1];
298  obj[i + 2] = ring[idx + 2];
299  obj[i + 3] = ring[idx + 3];
300  }
301  switch (n & 0x3) {
302  case 3:
303  obj[i++] = ring[idx++]; /* fallthrough */
304  case 2:
305  obj[i++] = ring[idx++]; /* fallthrough */
306  case 1:
307  obj[i++] = ring[idx++]; /* fallthrough */
308  }
309  } else {
310  for (i = 0; idx < size; i++, idx++)
311  obj[i] = ring[idx];
312  /* Start at the beginning */
313  for (idx = 0; i < n; i++, idx++)
314  obj[i] = ring[idx];
315  }
316 }
317 
318 static __rte_always_inline void
319 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
320  void *obj_table, uint32_t n)
321 {
322  unsigned int i;
323  const uint32_t size = r->size;
324  uint32_t idx = prod_head & r->mask;
325  rte_int128_t *ring = (rte_int128_t *)&r[1];
326  rte_int128_t *obj = (rte_int128_t *)obj_table;
327  if (likely(idx + n < size)) {
328  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
329  memcpy((void *)(obj + i), (void *)(ring + idx), 32);
330  switch (n & 0x1) {
331  case 1:
332  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
333  }
334  } else {
335  for (i = 0; idx < size; i++, idx++)
336  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
337  /* Start at the beginning */
338  for (idx = 0; i < n; i++, idx++)
339  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
340  }
341 }
342 
343 /* the actual dequeue of elements from the ring.
344  * Placed here since identical code needed in both
345  * single and multi producer enqueue functions.
346  */
347 static __rte_always_inline void
348 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
349  void *obj_table, uint32_t esize, uint32_t num)
350 {
351  /* 8B and 16B copies implemented individually to retain
352  * the current performance.
353  */
354  if (esize == 8)
355  __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
356  else if (esize == 16)
357  __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
358  else {
359  uint32_t idx, scale, nr_idx, nr_num, nr_size;
360 
361  /* Normalize to uint32_t */
362  scale = esize / sizeof(uint32_t);
363  nr_num = num * scale;
364  idx = cons_head & r->mask;
365  nr_idx = idx * scale;
366  nr_size = r->size * scale;
367  __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
368  obj_table, nr_num);
369  }
370 }
371 
372 /* Between load and load. there might be cpu reorder in weak model
373  * (powerpc/arm).
374  * There are 2 choices for the users
375  * 1.use rmb() memory barrier
376  * 2.use one-direction load_acquire/store_release barrier,defined by
377  * CONFIG_RTE_USE_C11_MEM_MODEL=y
378  * It depends on performance test results.
379  * By default, move common functions to rte_ring_generic.h
380  */
381 #ifdef RTE_USE_C11_MEM_MODEL
382 #include "rte_ring_c11_mem.h"
383 #else
384 #include "rte_ring_generic.h"
385 #endif
386 
411 static __rte_always_inline unsigned int
412 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
413  unsigned int esize, unsigned int n,
414  enum rte_ring_queue_behavior behavior, unsigned int is_sp,
415  unsigned int *free_space)
416 {
417  uint32_t prod_head, prod_next;
418  uint32_t free_entries;
419 
420  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
421  &prod_head, &prod_next, &free_entries);
422  if (n == 0)
423  goto end;
424 
425  __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
426 
427  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
428 end:
429  if (free_space != NULL)
430  *free_space = free_entries - n;
431  return n;
432 }
433 
458 static __rte_always_inline unsigned int
459 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
460  unsigned int esize, unsigned int n,
461  enum rte_ring_queue_behavior behavior, unsigned int is_sc,
462  unsigned int *available)
463 {
464  uint32_t cons_head, cons_next;
465  uint32_t entries;
466 
467  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
468  &cons_head, &cons_next, &entries);
469  if (n == 0)
470  goto end;
471 
472  __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
473 
474  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
475 
476 end:
477  if (available != NULL)
478  *available = entries - n;
479  return n;
480 }
481 
504 static __rte_always_inline unsigned int
505 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
506  unsigned int esize, unsigned int n, unsigned int *free_space)
507 {
508  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
510 }
511 
533 static __rte_always_inline unsigned int
534 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
535  unsigned int esize, unsigned int n, unsigned int *free_space)
536 {
537  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
539 }
540 
541 #ifdef ALLOW_EXPERIMENTAL_API
542 #include <rte_ring_hts.h>
543 #include <rte_ring_rts.h>
544 #endif
545 
569 static __rte_always_inline unsigned int
570 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
571  unsigned int esize, unsigned int n, unsigned int *free_space)
572 {
573  switch (r->prod.sync_type) {
574  case RTE_RING_SYNC_MT:
575  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
576  free_space);
577  case RTE_RING_SYNC_ST:
578  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
579  free_space);
580 #ifdef ALLOW_EXPERIMENTAL_API
581  case RTE_RING_SYNC_MT_RTS:
582  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
583  free_space);
584  case RTE_RING_SYNC_MT_HTS:
585  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
586  free_space);
587 #endif
588  }
589 
590  /* valid ring should never reach this point */
591  RTE_ASSERT(0);
592  if (free_space != NULL)
593  *free_space = 0;
594  return 0;
595 }
596 
615 static __rte_always_inline int
616 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
617 {
618  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
619  -ENOBUFS;
620 }
621 
639 static __rte_always_inline int
640 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
641 {
642  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
643  -ENOBUFS;
644 }
645 
665 static __rte_always_inline int
666 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
667 {
668  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
669  -ENOBUFS;
670 }
671 
694 static __rte_always_inline unsigned int
695 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
696  unsigned int esize, unsigned int n, unsigned int *available)
697 {
698  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
700 }
701 
722 static __rte_always_inline unsigned int
723 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
724  unsigned int esize, unsigned int n, unsigned int *available)
725 {
726  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
728 }
729 
753 static __rte_always_inline unsigned int
754 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
755  unsigned int esize, unsigned int n, unsigned int *available)
756 {
757  switch (r->cons.sync_type) {
758  case RTE_RING_SYNC_MT:
759  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
760  available);
761  case RTE_RING_SYNC_ST:
762  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
763  available);
764 #ifdef ALLOW_EXPERIMENTAL_API
765  case RTE_RING_SYNC_MT_RTS:
766  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
767  n, available);
768  case RTE_RING_SYNC_MT_HTS:
769  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
770  n, available);
771 #endif
772  }
773 
774  /* valid ring should never reach this point */
775  RTE_ASSERT(0);
776  if (available != NULL)
777  *available = 0;
778  return 0;
779 }
780 
800 static __rte_always_inline int
801 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
802  unsigned int esize)
803 {
804  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
805  -ENOENT;
806 }
807 
824 static __rte_always_inline int
825 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
826  unsigned int esize)
827 {
828  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
829  -ENOENT;
830 }
831 
852 static __rte_always_inline int
853 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
854 {
855  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
856  -ENOENT;
857 }
858 
881 static __rte_always_inline unsigned int
882 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
883  unsigned int esize, unsigned int n, unsigned int *free_space)
884 {
885  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
887 }
888 
910 static __rte_always_inline unsigned int
911 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
912  unsigned int esize, unsigned int n, unsigned int *free_space)
913 {
914  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
916 }
917 
941 static __rte_always_inline unsigned int
942 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
943  unsigned int esize, unsigned int n, unsigned int *free_space)
944 {
945  switch (r->prod.sync_type) {
946  case RTE_RING_SYNC_MT:
947  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
948  free_space);
949  case RTE_RING_SYNC_ST:
950  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
951  free_space);
952 #ifdef ALLOW_EXPERIMENTAL_API
953  case RTE_RING_SYNC_MT_RTS:
954  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
955  n, free_space);
956  case RTE_RING_SYNC_MT_HTS:
957  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
958  n, free_space);
959 #endif
960  }
961 
962  /* valid ring should never reach this point */
963  RTE_ASSERT(0);
964  if (free_space != NULL)
965  *free_space = 0;
966  return 0;
967 }
968 
993 static __rte_always_inline unsigned int
994 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
995  unsigned int esize, unsigned int n, unsigned int *available)
996 {
997  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
999 }
1000 
1022 static __rte_always_inline unsigned int
1023 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1024  unsigned int esize, unsigned int n, unsigned int *available)
1025 {
1026  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1028 }
1029 
1053 static __rte_always_inline unsigned int
1054 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1055  unsigned int esize, unsigned int n, unsigned int *available)
1056 {
1057  switch (r->cons.sync_type) {
1058  case RTE_RING_SYNC_MT:
1059  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1060  available);
1061  case RTE_RING_SYNC_ST:
1062  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1063  available);
1064 #ifdef ALLOW_EXPERIMENTAL_API
1065  case RTE_RING_SYNC_MT_RTS:
1066  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1067  n, available);
1068  case RTE_RING_SYNC_MT_HTS:
1069  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1070  n, available);
1071 #endif
1072  }
1073 
1074  /* valid ring should never reach this point */
1075  RTE_ASSERT(0);
1076  if (available != NULL)
1077  *available = 0;
1078  return 0;
1079 }
1080 
1081 #ifdef ALLOW_EXPERIMENTAL_API
1082 #include <rte_ring_peek.h>
1083 #endif
1084 
1085 #include <rte_ring.h>
1086 
1087 #ifdef __cplusplus
1088 }
1089 #endif
1090 
1091 #endif /* _RTE_RING_ELEM_H_ */
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
Definition: rte_common.h:202
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:250
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:193
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
#define likely(x)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:137
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:192
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:220
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:165
uint32_t size
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:223
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:164
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
uint32_t mask
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)