DPDK  2.1.0
rte_ring.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Derived from FreeBSD's bufring.h
36  *
37  **************************************************************************
38  *
39  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions are met:
44  *
45  * 1. Redistributions of source code must retain the above copyright notice,
46  * this list of conditions and the following disclaimer.
47  *
48  * 2. The name of Kip Macy nor the names of other
49  * contributors may be used to endorse or promote products derived from
50  * this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62  * POSSIBILITY OF SUCH DAMAGE.
63  *
64  ***************************************************************************/
65 
66 #ifndef _RTE_RING_H_
67 #define _RTE_RING_H_
68 
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93 
94 #include <stdio.h>
95 #include <stdint.h>
96 #include <sys/queue.h>
97 #include <errno.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
103 
104 #define RTE_TAILQ_RING_NAME "RTE_RING"
105 
106 enum rte_ring_queue_behavior {
107  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
108  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
109 };
110 
111 #ifdef RTE_LIBRTE_RING_DEBUG
112 
115 struct rte_ring_debug_stats {
116  uint64_t enq_success_bulk;
117  uint64_t enq_success_objs;
118  uint64_t enq_quota_bulk;
119  uint64_t enq_quota_objs;
120  uint64_t enq_fail_bulk;
121  uint64_t enq_fail_objs;
122  uint64_t deq_success_bulk;
123  uint64_t deq_success_objs;
124  uint64_t deq_fail_bulk;
125  uint64_t deq_fail_objs;
127 #endif
128 
129 #define RTE_RING_NAMESIZE 32
130 #define RTE_RING_MZ_PREFIX "RG_"
131 
132 #ifndef RTE_RING_PAUSE_REP_COUNT
133 #define RTE_RING_PAUSE_REP_COUNT 0
135 #endif
136 
147 struct rte_ring {
149  int flags;
152  struct prod {
153  uint32_t watermark;
154  uint32_t sp_enqueue;
155  uint32_t size;
156  uint32_t mask;
157  volatile uint32_t head;
158  volatile uint32_t tail;
159  } prod __rte_cache_aligned;
160 
162  struct cons {
163  uint32_t sc_dequeue;
164  uint32_t size;
165  uint32_t mask;
166  volatile uint32_t head;
167  volatile uint32_t tail;
168 #ifdef RTE_RING_SPLIT_PROD_CONS
169  } cons __rte_cache_aligned;
170 #else
171  } cons;
172 #endif
173 
174 #ifdef RTE_LIBRTE_RING_DEBUG
175  struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
176 #endif
177 
178  void * ring[0] __rte_cache_aligned;
181 };
182 
183 #define RING_F_SP_ENQ 0x0001
184 #define RING_F_SC_DEQ 0x0002
185 #define RTE_RING_QUOT_EXCEED (1 << 31)
186 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
197 #ifdef RTE_LIBRTE_RING_DEBUG
198 #define __RING_STAT_ADD(r, name, n) do { \
199  unsigned __lcore_id = rte_lcore_id(); \
200  if (__lcore_id < RTE_MAX_LCORE) { \
201  r->stats[__lcore_id].name##_objs += n; \
202  r->stats[__lcore_id].name##_bulk += 1; \
203  } \
204  } while(0)
205 #else
206 #define __RING_STAT_ADD(r, name, n) do {} while(0)
207 #endif
208 
223 ssize_t rte_ring_get_memsize(unsigned count);
224 
259 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
260  unsigned flags);
261 
301 struct rte_ring *rte_ring_create(const char *name, unsigned count,
302  int socket_id, unsigned flags);
303 
322 int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
323 
332 void rte_ring_dump(FILE *f, const struct rte_ring *r);
333 
334 /* the actual enqueue of pointers on the ring.
335  * Placed here since identical code needed in both
336  * single and multi producer enqueue functions */
337 #define ENQUEUE_PTRS() do { \
338  const uint32_t size = r->prod.size; \
339  uint32_t idx = prod_head & mask; \
340  if (likely(idx + n < size)) { \
341  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
342  r->ring[idx] = obj_table[i]; \
343  r->ring[idx+1] = obj_table[i+1]; \
344  r->ring[idx+2] = obj_table[i+2]; \
345  r->ring[idx+3] = obj_table[i+3]; \
346  } \
347  switch (n & 0x3) { \
348  case 3: r->ring[idx++] = obj_table[i++]; \
349  case 2: r->ring[idx++] = obj_table[i++]; \
350  case 1: r->ring[idx++] = obj_table[i++]; \
351  } \
352  } else { \
353  for (i = 0; idx < size; i++, idx++)\
354  r->ring[idx] = obj_table[i]; \
355  for (idx = 0; i < n; i++, idx++) \
356  r->ring[idx] = obj_table[i]; \
357  } \
358 } while(0)
359 
360 /* the actual copy of pointers on the ring to obj_table.
361  * Placed here since identical code needed in both
362  * single and multi consumer dequeue functions */
363 #define DEQUEUE_PTRS() do { \
364  uint32_t idx = cons_head & mask; \
365  const uint32_t size = r->cons.size; \
366  if (likely(idx + n < size)) { \
367  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
368  obj_table[i] = r->ring[idx]; \
369  obj_table[i+1] = r->ring[idx+1]; \
370  obj_table[i+2] = r->ring[idx+2]; \
371  obj_table[i+3] = r->ring[idx+3]; \
372  } \
373  switch (n & 0x3) { \
374  case 3: obj_table[i++] = r->ring[idx++]; \
375  case 2: obj_table[i++] = r->ring[idx++]; \
376  case 1: obj_table[i++] = r->ring[idx++]; \
377  } \
378  } else { \
379  for (i = 0; idx < size; i++, idx++) \
380  obj_table[i] = r->ring[idx]; \
381  for (idx = 0; i < n; i++, idx++) \
382  obj_table[i] = r->ring[idx]; \
383  } \
384 } while (0)
385 
411 static inline int __attribute__((always_inline))
412 __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
413  unsigned n, enum rte_ring_queue_behavior behavior)
414 {
415  uint32_t prod_head, prod_next;
416  uint32_t cons_tail, free_entries;
417  const unsigned max = n;
418  int success;
419  unsigned i, rep = 0;
420  uint32_t mask = r->prod.mask;
421  int ret;
422 
423  /* move prod.head atomically */
424  do {
425  /* Reset n to the initial burst count */
426  n = max;
427 
428  prod_head = r->prod.head;
429  cons_tail = r->cons.tail;
430  /* The subtraction is done between two unsigned 32bits value
431  * (the result is always modulo 32 bits even if we have
432  * prod_head > cons_tail). So 'free_entries' is always between 0
433  * and size(ring)-1. */
434  free_entries = (mask + cons_tail - prod_head);
435 
436  /* check that we have enough room in ring */
437  if (unlikely(n > free_entries)) {
438  if (behavior == RTE_RING_QUEUE_FIXED) {
439  __RING_STAT_ADD(r, enq_fail, n);
440  return -ENOBUFS;
441  }
442  else {
443  /* No free entry available */
444  if (unlikely(free_entries == 0)) {
445  __RING_STAT_ADD(r, enq_fail, n);
446  return 0;
447  }
448 
449  n = free_entries;
450  }
451  }
452 
453  prod_next = prod_head + n;
454  success = rte_atomic32_cmpset(&r->prod.head, prod_head,
455  prod_next);
456  } while (unlikely(success == 0));
457 
458  /* write entries in ring */
459  ENQUEUE_PTRS();
461 
462  /* if we exceed the watermark */
463  if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
464  ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
465  (int)(n | RTE_RING_QUOT_EXCEED);
466  __RING_STAT_ADD(r, enq_quota, n);
467  }
468  else {
469  ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
470  __RING_STAT_ADD(r, enq_success, n);
471  }
472 
473  /*
474  * If there are other enqueues in progress that preceded us,
475  * we need to wait for them to complete
476  */
477  while (unlikely(r->prod.tail != prod_head)) {
478  rte_pause();
479 
480  /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
481  * for other thread finish. It gives pre-empted thread a chance
482  * to proceed and finish with ring dequeue operation. */
484  ++rep == RTE_RING_PAUSE_REP_COUNT) {
485  rep = 0;
486  sched_yield();
487  }
488  }
489  r->prod.tail = prod_next;
490  return ret;
491 }
492 
515 static inline int __attribute__((always_inline))
516 __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
517  unsigned n, enum rte_ring_queue_behavior behavior)
518 {
519  uint32_t prod_head, cons_tail;
520  uint32_t prod_next, free_entries;
521  unsigned i;
522  uint32_t mask = r->prod.mask;
523  int ret;
524 
525  prod_head = r->prod.head;
526  cons_tail = r->cons.tail;
527  /* The subtraction is done between two unsigned 32bits value
528  * (the result is always modulo 32 bits even if we have
529  * prod_head > cons_tail). So 'free_entries' is always between 0
530  * and size(ring)-1. */
531  free_entries = mask + cons_tail - prod_head;
532 
533  /* check that we have enough room in ring */
534  if (unlikely(n > free_entries)) {
535  if (behavior == RTE_RING_QUEUE_FIXED) {
536  __RING_STAT_ADD(r, enq_fail, n);
537  return -ENOBUFS;
538  }
539  else {
540  /* No free entry available */
541  if (unlikely(free_entries == 0)) {
542  __RING_STAT_ADD(r, enq_fail, n);
543  return 0;
544  }
545 
546  n = free_entries;
547  }
548  }
549 
550  prod_next = prod_head + n;
551  r->prod.head = prod_next;
552 
553  /* write entries in ring */
554  ENQUEUE_PTRS();
556 
557  /* if we exceed the watermark */
558  if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
559  ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
560  (int)(n | RTE_RING_QUOT_EXCEED);
561  __RING_STAT_ADD(r, enq_quota, n);
562  }
563  else {
564  ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
565  __RING_STAT_ADD(r, enq_success, n);
566  }
567 
568  r->prod.tail = prod_next;
569  return ret;
570 }
571 
599 static inline int __attribute__((always_inline))
600 __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
601  unsigned n, enum rte_ring_queue_behavior behavior)
602 {
603  uint32_t cons_head, prod_tail;
604  uint32_t cons_next, entries;
605  const unsigned max = n;
606  int success;
607  unsigned i, rep = 0;
608  uint32_t mask = r->prod.mask;
609 
610  /* move cons.head atomically */
611  do {
612  /* Restore n as it may change every loop */
613  n = max;
614 
615  cons_head = r->cons.head;
616  prod_tail = r->prod.tail;
617  /* The subtraction is done between two unsigned 32bits value
618  * (the result is always modulo 32 bits even if we have
619  * cons_head > prod_tail). So 'entries' is always between 0
620  * and size(ring)-1. */
621  entries = (prod_tail - cons_head);
622 
623  /* Set the actual entries for dequeue */
624  if (n > entries) {
625  if (behavior == RTE_RING_QUEUE_FIXED) {
626  __RING_STAT_ADD(r, deq_fail, n);
627  return -ENOENT;
628  }
629  else {
630  if (unlikely(entries == 0)){
631  __RING_STAT_ADD(r, deq_fail, n);
632  return 0;
633  }
634 
635  n = entries;
636  }
637  }
638 
639  cons_next = cons_head + n;
640  success = rte_atomic32_cmpset(&r->cons.head, cons_head,
641  cons_next);
642  } while (unlikely(success == 0));
643 
644  /* copy in table */
645  DEQUEUE_PTRS();
647 
648  /*
649  * If there are other dequeues in progress that preceded us,
650  * we need to wait for them to complete
651  */
652  while (unlikely(r->cons.tail != cons_head)) {
653  rte_pause();
654 
655  /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
656  * for other thread finish. It gives pre-empted thread a chance
657  * to proceed and finish with ring dequeue operation. */
659  ++rep == RTE_RING_PAUSE_REP_COUNT) {
660  rep = 0;
661  sched_yield();
662  }
663  }
664  __RING_STAT_ADD(r, deq_success, n);
665  r->cons.tail = cons_next;
666 
667  return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
668 }
669 
693 static inline int __attribute__((always_inline))
694 __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
695  unsigned n, enum rte_ring_queue_behavior behavior)
696 {
697  uint32_t cons_head, prod_tail;
698  uint32_t cons_next, entries;
699  unsigned i;
700  uint32_t mask = r->prod.mask;
701 
702  cons_head = r->cons.head;
703  prod_tail = r->prod.tail;
704  /* The subtraction is done between two unsigned 32bits value
705  * (the result is always modulo 32 bits even if we have
706  * cons_head > prod_tail). So 'entries' is always between 0
707  * and size(ring)-1. */
708  entries = prod_tail - cons_head;
709 
710  if (n > entries) {
711  if (behavior == RTE_RING_QUEUE_FIXED) {
712  __RING_STAT_ADD(r, deq_fail, n);
713  return -ENOENT;
714  }
715  else {
716  if (unlikely(entries == 0)){
717  __RING_STAT_ADD(r, deq_fail, n);
718  return 0;
719  }
720 
721  n = entries;
722  }
723  }
724 
725  cons_next = cons_head + n;
726  r->cons.head = cons_next;
727 
728  /* copy in table */
729  DEQUEUE_PTRS();
731 
732  __RING_STAT_ADD(r, deq_success, n);
733  r->cons.tail = cons_next;
734  return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
735 }
736 
755 static inline int __attribute__((always_inline))
756 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
757  unsigned n)
758 {
759  return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
760 }
761 
777 static inline int __attribute__((always_inline))
778 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
779  unsigned n)
780 {
781  return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
782 }
783 
803 static inline int __attribute__((always_inline))
804 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
805  unsigned n)
806 {
807  if (r->prod.sp_enqueue)
808  return rte_ring_sp_enqueue_bulk(r, obj_table, n);
809  else
810  return rte_ring_mp_enqueue_bulk(r, obj_table, n);
811 }
812 
829 static inline int __attribute__((always_inline))
830 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
831 {
832  return rte_ring_mp_enqueue_bulk(r, &obj, 1);
833 }
834 
848 static inline int __attribute__((always_inline))
849 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
850 {
851  return rte_ring_sp_enqueue_bulk(r, &obj, 1);
852 }
853 
871 static inline int __attribute__((always_inline))
872 rte_ring_enqueue(struct rte_ring *r, void *obj)
873 {
874  if (r->prod.sp_enqueue)
875  return rte_ring_sp_enqueue(r, obj);
876  else
877  return rte_ring_mp_enqueue(r, obj);
878 }
879 
897 static inline int __attribute__((always_inline))
898 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
899 {
900  return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
901 }
902 
918 static inline int __attribute__((always_inline))
919 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
920 {
921  return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
922 }
923 
942 static inline int __attribute__((always_inline))
943 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
944 {
945  if (r->cons.sc_dequeue)
946  return rte_ring_sc_dequeue_bulk(r, obj_table, n);
947  else
948  return rte_ring_mc_dequeue_bulk(r, obj_table, n);
949 }
950 
966 static inline int __attribute__((always_inline))
967 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
968 {
969  return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
970 }
971 
984 static inline int __attribute__((always_inline))
985 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
986 {
987  return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
988 }
989 
1006 static inline int __attribute__((always_inline))
1007 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
1008 {
1009  if (r->cons.sc_dequeue)
1010  return rte_ring_sc_dequeue(r, obj_p);
1011  else
1012  return rte_ring_mc_dequeue(r, obj_p);
1013 }
1014 
1024 static inline int
1025 rte_ring_full(const struct rte_ring *r)
1026 {
1027  uint32_t prod_tail = r->prod.tail;
1028  uint32_t cons_tail = r->cons.tail;
1029  return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
1030 }
1031 
1041 static inline int
1042 rte_ring_empty(const struct rte_ring *r)
1043 {
1044  uint32_t prod_tail = r->prod.tail;
1045  uint32_t cons_tail = r->cons.tail;
1046  return !!(cons_tail == prod_tail);
1047 }
1048 
1057 static inline unsigned
1058 rte_ring_count(const struct rte_ring *r)
1059 {
1060  uint32_t prod_tail = r->prod.tail;
1061  uint32_t cons_tail = r->cons.tail;
1062  return ((prod_tail - cons_tail) & r->prod.mask);
1063 }
1064 
1073 static inline unsigned
1075 {
1076  uint32_t prod_tail = r->prod.tail;
1077  uint32_t cons_tail = r->cons.tail;
1078  return ((cons_tail - prod_tail - 1) & r->prod.mask);
1079 }
1080 
1087 void rte_ring_list_dump(FILE *f);
1088 
1099 struct rte_ring *rte_ring_lookup(const char *name);
1100 
1116 static inline unsigned __attribute__((always_inline))
1117 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1118  unsigned n)
1119 {
1120  return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1121 }
1122 
1135 static inline unsigned __attribute__((always_inline))
1136 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1137  unsigned n)
1138 {
1139  return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1140 }
1141 
1158 static inline unsigned __attribute__((always_inline))
1159 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1160  unsigned n)
1161 {
1162  if (r->prod.sp_enqueue)
1163  return rte_ring_sp_enqueue_burst(r, obj_table, n);
1164  else
1165  return rte_ring_mp_enqueue_burst(r, obj_table, n);
1166 }
1167 
1185 static inline unsigned __attribute__((always_inline))
1186 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1187 {
1188  return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1189 }
1190 
1205 static inline unsigned __attribute__((always_inline))
1206 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1207 {
1208  return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1209 }
1210 
1227 static inline unsigned __attribute__((always_inline))
1228 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
1229 {
1230  if (r->cons.sc_dequeue)
1231  return rte_ring_sc_dequeue_burst(r, obj_table, n);
1232  else
1233  return rte_ring_mc_dequeue_burst(r, obj_table, n);
1234 }
1235 
1236 #ifdef __cplusplus
1237 }
1238 #endif
1239 
1240 #endif /* _RTE_RING_H_ */