DPDK  16.04.0
rte_ring.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Derived from FreeBSD's bufring.h
36  *
37  **************************************************************************
38  *
39  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions are met:
44  *
45  * 1. Redistributions of source code must retain the above copyright notice,
46  * this list of conditions and the following disclaimer.
47  *
48  * 2. The name of Kip Macy nor the names of other
49  * contributors may be used to endorse or promote products derived from
50  * this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62  * POSSIBILITY OF SUCH DAMAGE.
63  *
64  ***************************************************************************/
65 
66 #ifndef _RTE_RING_H_
67 #define _RTE_RING_H_
68 
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93 
94 #include <stdio.h>
95 #include <stdint.h>
96 #include <sys/queue.h>
97 #include <errno.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
103 
104 #define RTE_TAILQ_RING_NAME "RTE_RING"
105 
106 enum rte_ring_queue_behavior {
107  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
108  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
109 };
110 
111 #ifdef RTE_LIBRTE_RING_DEBUG
112 
115 struct rte_ring_debug_stats {
116  uint64_t enq_success_bulk;
117  uint64_t enq_success_objs;
118  uint64_t enq_quota_bulk;
119  uint64_t enq_quota_objs;
120  uint64_t enq_fail_bulk;
121  uint64_t enq_fail_objs;
122  uint64_t deq_success_bulk;
123  uint64_t deq_success_objs;
124  uint64_t deq_fail_bulk;
125  uint64_t deq_fail_objs;
127 #endif
128 
129 #define RTE_RING_NAMESIZE 32
130 #define RTE_RING_MZ_PREFIX "RG_"
131 
132 #ifndef RTE_RING_PAUSE_REP_COUNT
133 #define RTE_RING_PAUSE_REP_COUNT 0
135 #endif
136 
137 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
138 
149 struct rte_ring {
151  int flags;
152  const struct rte_memzone *memzone;
156  struct prod {
157  uint32_t watermark;
158  uint32_t sp_enqueue;
159  uint32_t size;
160  uint32_t mask;
161  volatile uint32_t head;
162  volatile uint32_t tail;
163  } prod __rte_cache_aligned;
164 
166  struct cons {
167  uint32_t sc_dequeue;
168  uint32_t size;
169  uint32_t mask;
170  volatile uint32_t head;
171  volatile uint32_t tail;
172 #ifdef RTE_RING_SPLIT_PROD_CONS
173  } cons __rte_cache_aligned;
174 #else
175  } cons;
176 #endif
177 
178 #ifdef RTE_LIBRTE_RING_DEBUG
179  struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
180 #endif
181 
182  void * ring[0] __rte_cache_aligned;
185 };
186 
187 #define RING_F_SP_ENQ 0x0001
188 #define RING_F_SC_DEQ 0x0002
189 #define RTE_RING_QUOT_EXCEED (1 << 31)
190 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
201 #ifdef RTE_LIBRTE_RING_DEBUG
202 #define __RING_STAT_ADD(r, name, n) do { \
203  unsigned __lcore_id = rte_lcore_id(); \
204  if (__lcore_id < RTE_MAX_LCORE) { \
205  r->stats[__lcore_id].name##_objs += n; \
206  r->stats[__lcore_id].name##_bulk += 1; \
207  } \
208  } while(0)
209 #else
210 #define __RING_STAT_ADD(r, name, n) do {} while(0)
211 #endif
212 
227 ssize_t rte_ring_get_memsize(unsigned count);
228 
263 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
264  unsigned flags);
265 
305 struct rte_ring *rte_ring_create(const char *name, unsigned count,
306  int socket_id, unsigned flags);
313 void rte_ring_free(struct rte_ring *r);
314 
333 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
334 
343 void rte_ring_dump(FILE *f, const struct rte_ring *r);
344 
345 /* the actual enqueue of pointers on the ring.
346  * Placed here since identical code needed in both
347  * single and multi producer enqueue functions */
348 #define ENQUEUE_PTRS() do { \
349  const uint32_t size = r->prod.size; \
350  uint32_t idx = prod_head & mask; \
351  if (likely(idx + n < size)) { \
352  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
353  r->ring[idx] = obj_table[i]; \
354  r->ring[idx+1] = obj_table[i+1]; \
355  r->ring[idx+2] = obj_table[i+2]; \
356  r->ring[idx+3] = obj_table[i+3]; \
357  } \
358  switch (n & 0x3) { \
359  case 3: r->ring[idx++] = obj_table[i++]; \
360  case 2: r->ring[idx++] = obj_table[i++]; \
361  case 1: r->ring[idx++] = obj_table[i++]; \
362  } \
363  } else { \
364  for (i = 0; idx < size; i++, idx++)\
365  r->ring[idx] = obj_table[i]; \
366  for (idx = 0; i < n; i++, idx++) \
367  r->ring[idx] = obj_table[i]; \
368  } \
369 } while(0)
370 
371 /* the actual copy of pointers on the ring to obj_table.
372  * Placed here since identical code needed in both
373  * single and multi consumer dequeue functions */
374 #define DEQUEUE_PTRS() do { \
375  uint32_t idx = cons_head & mask; \
376  const uint32_t size = r->cons.size; \
377  if (likely(idx + n < size)) { \
378  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
379  obj_table[i] = r->ring[idx]; \
380  obj_table[i+1] = r->ring[idx+1]; \
381  obj_table[i+2] = r->ring[idx+2]; \
382  obj_table[i+3] = r->ring[idx+3]; \
383  } \
384  switch (n & 0x3) { \
385  case 3: obj_table[i++] = r->ring[idx++]; \
386  case 2: obj_table[i++] = r->ring[idx++]; \
387  case 1: obj_table[i++] = r->ring[idx++]; \
388  } \
389  } else { \
390  for (i = 0; idx < size; i++, idx++) \
391  obj_table[i] = r->ring[idx]; \
392  for (idx = 0; i < n; i++, idx++) \
393  obj_table[i] = r->ring[idx]; \
394  } \
395 } while (0)
396 
422 static inline int __attribute__((always_inline))
423 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
424  unsigned n, enum rte_ring_queue_behavior behavior)
425 {
426  uint32_t prod_head, prod_next;
427  uint32_t cons_tail, free_entries;
428  const unsigned max = n;
429  int success;
430  unsigned i, rep = 0;
431  uint32_t mask = r->prod.mask;
432  int ret;
433 
434  /* Avoid the unnecessary cmpset operation below, which is also
435  * potentially harmful when n equals 0. */
436  if (n == 0)
437  return 0;
438 
439  /* move prod.head atomically */
440  do {
441  /* Reset n to the initial burst count */
442  n = max;
443 
444  prod_head = r->prod.head;
445  cons_tail = r->cons.tail;
446  /* The subtraction is done between two unsigned 32bits value
447  * (the result is always modulo 32 bits even if we have
448  * prod_head > cons_tail). So 'free_entries' is always between 0
449  * and size(ring)-1. */
450  free_entries = (mask + cons_tail - prod_head);
451 
452  /* check that we have enough room in ring */
453  if (unlikely(n > free_entries)) {
454  if (behavior == RTE_RING_QUEUE_FIXED) {
455  __RING_STAT_ADD(r, enq_fail, n);
456  return -ENOBUFS;
457  }
458  else {
459  /* No free entry available */
460  if (unlikely(free_entries == 0)) {
461  __RING_STAT_ADD(r, enq_fail, n);
462  return 0;
463  }
464 
465  n = free_entries;
466  }
467  }
468 
469  prod_next = prod_head + n;
470  success = rte_atomic32_cmpset(&r->prod.head, prod_head,
471  prod_next);
472  } while (unlikely(success == 0));
473 
474  /* write entries in ring */
475  ENQUEUE_PTRS();
476  rte_smp_wmb();
477 
478  /* if we exceed the watermark */
479  if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
480  ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
481  (int)(n | RTE_RING_QUOT_EXCEED);
482  __RING_STAT_ADD(r, enq_quota, n);
483  }
484  else {
485  ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
486  __RING_STAT_ADD(r, enq_success, n);
487  }
488 
489  /*
490  * If there are other enqueues in progress that preceded us,
491  * we need to wait for them to complete
492  */
493  while (unlikely(r->prod.tail != prod_head)) {
494  rte_pause();
495 
496  /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
497  * for other thread finish. It gives pre-empted thread a chance
498  * to proceed and finish with ring dequeue operation. */
500  ++rep == RTE_RING_PAUSE_REP_COUNT) {
501  rep = 0;
502  sched_yield();
503  }
504  }
505  r->prod.tail = prod_next;
506  return ret;
507 }
508 
531 static inline int __attribute__((always_inline))
532 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
533  unsigned n, enum rte_ring_queue_behavior behavior)
534 {
535  uint32_t prod_head, cons_tail;
536  uint32_t prod_next, free_entries;
537  unsigned i;
538  uint32_t mask = r->prod.mask;
539  int ret;
540 
541  prod_head = r->prod.head;
542  cons_tail = r->cons.tail;
543  /* The subtraction is done between two unsigned 32bits value
544  * (the result is always modulo 32 bits even if we have
545  * prod_head > cons_tail). So 'free_entries' is always between 0
546  * and size(ring)-1. */
547  free_entries = mask + cons_tail - prod_head;
548 
549  /* check that we have enough room in ring */
550  if (unlikely(n > free_entries)) {
551  if (behavior == RTE_RING_QUEUE_FIXED) {
552  __RING_STAT_ADD(r, enq_fail, n);
553  return -ENOBUFS;
554  }
555  else {
556  /* No free entry available */
557  if (unlikely(free_entries == 0)) {
558  __RING_STAT_ADD(r, enq_fail, n);
559  return 0;
560  }
561 
562  n = free_entries;
563  }
564  }
565 
566  prod_next = prod_head + n;
567  r->prod.head = prod_next;
568 
569  /* write entries in ring */
570  ENQUEUE_PTRS();
571  rte_smp_wmb();
572 
573  /* if we exceed the watermark */
574  if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
575  ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
576  (int)(n | RTE_RING_QUOT_EXCEED);
577  __RING_STAT_ADD(r, enq_quota, n);
578  }
579  else {
580  ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
581  __RING_STAT_ADD(r, enq_success, n);
582  }
583 
584  r->prod.tail = prod_next;
585  return ret;
586 }
587 
615 static inline int __attribute__((always_inline))
616 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
617  unsigned n, enum rte_ring_queue_behavior behavior)
618 {
619  uint32_t cons_head, prod_tail;
620  uint32_t cons_next, entries;
621  const unsigned max = n;
622  int success;
623  unsigned i, rep = 0;
624  uint32_t mask = r->prod.mask;
625 
626  /* Avoid the unnecessary cmpset operation below, which is also
627  * potentially harmful when n equals 0. */
628  if (n == 0)
629  return 0;
630 
631  /* move cons.head atomically */
632  do {
633  /* Restore n as it may change every loop */
634  n = max;
635 
636  cons_head = r->cons.head;
637  prod_tail = r->prod.tail;
638  /* The subtraction is done between two unsigned 32bits value
639  * (the result is always modulo 32 bits even if we have
640  * cons_head > prod_tail). So 'entries' is always between 0
641  * and size(ring)-1. */
642  entries = (prod_tail - cons_head);
643 
644  /* Set the actual entries for dequeue */
645  if (n > entries) {
646  if (behavior == RTE_RING_QUEUE_FIXED) {
647  __RING_STAT_ADD(r, deq_fail, n);
648  return -ENOENT;
649  }
650  else {
651  if (unlikely(entries == 0)){
652  __RING_STAT_ADD(r, deq_fail, n);
653  return 0;
654  }
655 
656  n = entries;
657  }
658  }
659 
660  cons_next = cons_head + n;
661  success = rte_atomic32_cmpset(&r->cons.head, cons_head,
662  cons_next);
663  } while (unlikely(success == 0));
664 
665  /* copy in table */
666  DEQUEUE_PTRS();
667  rte_smp_rmb();
668 
669  /*
670  * If there are other dequeues in progress that preceded us,
671  * we need to wait for them to complete
672  */
673  while (unlikely(r->cons.tail != cons_head)) {
674  rte_pause();
675 
676  /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
677  * for other thread finish. It gives pre-empted thread a chance
678  * to proceed and finish with ring dequeue operation. */
680  ++rep == RTE_RING_PAUSE_REP_COUNT) {
681  rep = 0;
682  sched_yield();
683  }
684  }
685  __RING_STAT_ADD(r, deq_success, n);
686  r->cons.tail = cons_next;
687 
688  return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
689 }
690 
714 static inline int __attribute__((always_inline))
715 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
716  unsigned n, enum rte_ring_queue_behavior behavior)
717 {
718  uint32_t cons_head, prod_tail;
719  uint32_t cons_next, entries;
720  unsigned i;
721  uint32_t mask = r->prod.mask;
722 
723  cons_head = r->cons.head;
724  prod_tail = r->prod.tail;
725  /* The subtraction is done between two unsigned 32bits value
726  * (the result is always modulo 32 bits even if we have
727  * cons_head > prod_tail). So 'entries' is always between 0
728  * and size(ring)-1. */
729  entries = prod_tail - cons_head;
730 
731  if (n > entries) {
732  if (behavior == RTE_RING_QUEUE_FIXED) {
733  __RING_STAT_ADD(r, deq_fail, n);
734  return -ENOENT;
735  }
736  else {
737  if (unlikely(entries == 0)){
738  __RING_STAT_ADD(r, deq_fail, n);
739  return 0;
740  }
741 
742  n = entries;
743  }
744  }
745 
746  cons_next = cons_head + n;
747  r->cons.head = cons_next;
748 
749  /* copy in table */
750  DEQUEUE_PTRS();
751  rte_smp_rmb();
752 
753  __RING_STAT_ADD(r, deq_success, n);
754  r->cons.tail = cons_next;
755  return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
756 }
757 
776 static inline int __attribute__((always_inline))
777 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
778  unsigned n)
779 {
780  return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
781 }
782 
798 static inline int __attribute__((always_inline))
799 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
800  unsigned n)
801 {
802  return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
803 }
804 
824 static inline int __attribute__((always_inline))
825 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
826  unsigned n)
827 {
828  if (r->prod.sp_enqueue)
829  return rte_ring_sp_enqueue_bulk(r, obj_table, n);
830  else
831  return rte_ring_mp_enqueue_bulk(r, obj_table, n);
832 }
833 
850 static inline int __attribute__((always_inline))
851 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
852 {
853  return rte_ring_mp_enqueue_bulk(r, &obj, 1);
854 }
855 
869 static inline int __attribute__((always_inline))
870 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
871 {
872  return rte_ring_sp_enqueue_bulk(r, &obj, 1);
873 }
874 
892 static inline int __attribute__((always_inline))
893 rte_ring_enqueue(struct rte_ring *r, void *obj)
894 {
895  if (r->prod.sp_enqueue)
896  return rte_ring_sp_enqueue(r, obj);
897  else
898  return rte_ring_mp_enqueue(r, obj);
899 }
900 
918 static inline int __attribute__((always_inline))
919 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
920 {
921  return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
922 }
923 
939 static inline int __attribute__((always_inline))
940 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
941 {
942  return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
943 }
944 
963 static inline int __attribute__((always_inline))
964 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
965 {
966  if (r->cons.sc_dequeue)
967  return rte_ring_sc_dequeue_bulk(r, obj_table, n);
968  else
969  return rte_ring_mc_dequeue_bulk(r, obj_table, n);
970 }
971 
987 static inline int __attribute__((always_inline))
988 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
989 {
990  return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
991 }
992 
1005 static inline int __attribute__((always_inline))
1006 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
1007 {
1008  return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
1009 }
1010 
1027 static inline int __attribute__((always_inline))
1028 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
1029 {
1030  if (r->cons.sc_dequeue)
1031  return rte_ring_sc_dequeue(r, obj_p);
1032  else
1033  return rte_ring_mc_dequeue(r, obj_p);
1034 }
1035 
1045 static inline int
1046 rte_ring_full(const struct rte_ring *r)
1047 {
1048  uint32_t prod_tail = r->prod.tail;
1049  uint32_t cons_tail = r->cons.tail;
1050  return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
1051 }
1052 
1062 static inline int
1063 rte_ring_empty(const struct rte_ring *r)
1064 {
1065  uint32_t prod_tail = r->prod.tail;
1066  uint32_t cons_tail = r->cons.tail;
1067  return !!(cons_tail == prod_tail);
1068 }
1069 
1078 static inline unsigned
1079 rte_ring_count(const struct rte_ring *r)
1080 {
1081  uint32_t prod_tail = r->prod.tail;
1082  uint32_t cons_tail = r->cons.tail;
1083  return (prod_tail - cons_tail) & r->prod.mask;
1084 }
1085 
1094 static inline unsigned
1096 {
1097  uint32_t prod_tail = r->prod.tail;
1098  uint32_t cons_tail = r->cons.tail;
1099  return (cons_tail - prod_tail - 1) & r->prod.mask;
1100 }
1101 
1108 void rte_ring_list_dump(FILE *f);
1109 
1120 struct rte_ring *rte_ring_lookup(const char *name);
1121 
1137 static inline unsigned __attribute__((always_inline))
1138 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1139  unsigned n)
1140 {
1141  return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1142 }
1143 
1156 static inline unsigned __attribute__((always_inline))
1157 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1158  unsigned n)
1159 {
1160  return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1161 }
1162 
1179 static inline unsigned __attribute__((always_inline))
1180 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1181  unsigned n)
1182 {
1183  if (r->prod.sp_enqueue)
1184  return rte_ring_sp_enqueue_burst(r, obj_table, n);
1185  else
1186  return rte_ring_mp_enqueue_burst(r, obj_table, n);
1187 }
1188 
1206 static inline unsigned __attribute__((always_inline))
1207 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1208 {
1209  return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1210 }
1211 
1226 static inline unsigned __attribute__((always_inline))
1227 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1228 {
1229  return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1230 }
1231 
1248 static inline unsigned __attribute__((always_inline))
1249 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1250 {
1251  if (r->cons.sc_dequeue)
1252  return rte_ring_sc_dequeue_burst(r, obj_table, n);
1253  else
1254  return rte_ring_mc_dequeue_burst(r, obj_table, n);
1255 }
1256 
1257 #ifdef __cplusplus
1258 }
1259 #endif
1260 
1261 #endif /* _RTE_RING_H_ */