96 #include <sys/queue.h>
104 #define RTE_TAILQ_RING_NAME "RTE_RING"
106 enum rte_ring_queue_behavior {
107 RTE_RING_QUEUE_FIXED = 0,
108 RTE_RING_QUEUE_VARIABLE
111 #ifdef RTE_LIBRTE_RING_DEBUG
115 struct rte_ring_debug_stats {
116 uint64_t enq_success_bulk;
117 uint64_t enq_success_objs;
118 uint64_t enq_quota_bulk;
119 uint64_t enq_quota_objs;
120 uint64_t enq_fail_bulk;
121 uint64_t enq_fail_objs;
122 uint64_t deq_success_bulk;
123 uint64_t deq_success_objs;
124 uint64_t deq_fail_bulk;
125 uint64_t deq_fail_objs;
129 #define RTE_RING_NAMESIZE 32
130 #define RTE_RING_MZ_PREFIX "RG_"
132 #ifndef RTE_RING_PAUSE_REP_COUNT
133 #define RTE_RING_PAUSE_REP_COUNT 0
159 }
prod __rte_cache_aligned;
168 #ifdef RTE_RING_SPLIT_PROD_CONS
169 }
cons __rte_cache_aligned;
174 #ifdef RTE_LIBRTE_RING_DEBUG
175 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
183 #define RING_F_SP_ENQ 0x0001
184 #define RING_F_SC_DEQ 0x0002
185 #define RTE_RING_QUOT_EXCEED (1 << 31)
186 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
197 #ifdef RTE_LIBRTE_RING_DEBUG
198 #define __RING_STAT_ADD(r, name, n) do { \
199 unsigned __lcore_id = rte_lcore_id(); \
200 if (__lcore_id < RTE_MAX_LCORE) { \
201 r->stats[__lcore_id].name##_objs += n; \
202 r->stats[__lcore_id].name##_bulk += 1; \
206 #define __RING_STAT_ADD(r, name, n) do {} while(0)
302 int socket_id,
unsigned flags);
337 #define ENQUEUE_PTRS() do { \
338 const uint32_t size = r->prod.size; \
339 uint32_t idx = prod_head & mask; \
340 if (likely(idx + n < size)) { \
341 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
342 r->ring[idx] = obj_table[i]; \
343 r->ring[idx+1] = obj_table[i+1]; \
344 r->ring[idx+2] = obj_table[i+2]; \
345 r->ring[idx+3] = obj_table[i+3]; \
348 case 3: r->ring[idx++] = obj_table[i++]; \
349 case 2: r->ring[idx++] = obj_table[i++]; \
350 case 1: r->ring[idx++] = obj_table[i++]; \
353 for (i = 0; idx < size; i++, idx++)\
354 r->ring[idx] = obj_table[i]; \
355 for (idx = 0; i < n; i++, idx++) \
356 r->ring[idx] = obj_table[i]; \
363 #define DEQUEUE_PTRS() do { \
364 uint32_t idx = cons_head & mask; \
365 const uint32_t size = r->cons.size; \
366 if (likely(idx + n < size)) { \
367 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
368 obj_table[i] = r->ring[idx]; \
369 obj_table[i+1] = r->ring[idx+1]; \
370 obj_table[i+2] = r->ring[idx+2]; \
371 obj_table[i+3] = r->ring[idx+3]; \
374 case 3: obj_table[i++] = r->ring[idx++]; \
375 case 2: obj_table[i++] = r->ring[idx++]; \
376 case 1: obj_table[i++] = r->ring[idx++]; \
379 for (i = 0; idx < size; i++, idx++) \
380 obj_table[i] = r->ring[idx]; \
381 for (idx = 0; i < n; i++, idx++) \
382 obj_table[i] = r->ring[idx]; \
411 static inline int __attribute__((always_inline))
412 __rte_ring_mp_do_enqueue(struct
rte_ring *r,
void * const *obj_table,
413 unsigned n, enum rte_ring_queue_behavior behavior)
415 uint32_t prod_head, prod_next;
416 uint32_t cons_tail, free_entries;
417 const unsigned max = n;
420 uint32_t mask = r->prod.mask;
428 prod_head = r->prod.head;
429 cons_tail = r->cons.tail;
434 free_entries = (mask + cons_tail - prod_head);
438 if (behavior == RTE_RING_QUEUE_FIXED) {
439 __RING_STAT_ADD(r, enq_fail, n);
445 __RING_STAT_ADD(r, enq_fail, n);
453 prod_next = prod_head + n;
463 if (
unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
464 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
466 __RING_STAT_ADD(r, enq_quota, n);
469 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
470 __RING_STAT_ADD(r, enq_success, n);
477 while (
unlikely(r->prod.tail != prod_head)) {
489 r->prod.tail = prod_next;
515 static inline int __attribute__((always_inline))
516 __rte_ring_sp_do_enqueue(struct
rte_ring *r,
void * const *obj_table,
517 unsigned n, enum rte_ring_queue_behavior behavior)
519 uint32_t prod_head, cons_tail;
520 uint32_t prod_next, free_entries;
522 uint32_t mask = r->prod.mask;
525 prod_head = r->prod.head;
526 cons_tail = r->cons.tail;
531 free_entries = mask + cons_tail - prod_head;
535 if (behavior == RTE_RING_QUEUE_FIXED) {
536 __RING_STAT_ADD(r, enq_fail, n);
542 __RING_STAT_ADD(r, enq_fail, n);
550 prod_next = prod_head + n;
551 r->prod.head = prod_next;
558 if (
unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
559 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
561 __RING_STAT_ADD(r, enq_quota, n);
564 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
565 __RING_STAT_ADD(r, enq_success, n);
568 r->prod.tail = prod_next;
599 static inline int __attribute__((always_inline))
600 __rte_ring_mc_do_dequeue(struct
rte_ring *r,
void **obj_table,
601 unsigned n, enum rte_ring_queue_behavior behavior)
603 uint32_t cons_head, prod_tail;
604 uint32_t cons_next, entries;
605 const unsigned max = n;
608 uint32_t mask = r->prod.mask;
615 cons_head = r->cons.head;
616 prod_tail = r->prod.tail;
621 entries = (prod_tail - cons_head);
625 if (behavior == RTE_RING_QUEUE_FIXED) {
626 __RING_STAT_ADD(r, deq_fail, n);
631 __RING_STAT_ADD(r, deq_fail, n);
639 cons_next = cons_head + n;
652 while (
unlikely(r->cons.tail != cons_head)) {
664 __RING_STAT_ADD(r, deq_success, n);
665 r->cons.tail = cons_next;
667 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
693 static inline int __attribute__((always_inline))
694 __rte_ring_sc_do_dequeue(struct
rte_ring *r,
void **obj_table,
695 unsigned n, enum rte_ring_queue_behavior behavior)
697 uint32_t cons_head, prod_tail;
698 uint32_t cons_next, entries;
700 uint32_t mask = r->prod.mask;
702 cons_head = r->cons.head;
703 prod_tail = r->prod.tail;
708 entries = prod_tail - cons_head;
711 if (behavior == RTE_RING_QUEUE_FIXED) {
712 __RING_STAT_ADD(r, deq_fail, n);
717 __RING_STAT_ADD(r, deq_fail, n);
725 cons_next = cons_head + n;
726 r->cons.head = cons_next;
732 __RING_STAT_ADD(r, deq_success, n);
733 r->cons.tail = cons_next;
734 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
755 static inline int __attribute__((always_inline))
759 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
777 static inline int __attribute__((always_inline))
781 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
803 static inline int __attribute__((always_inline))
807 if (r->prod.sp_enqueue)
829 static inline int __attribute__((always_inline))
848 static inline int __attribute__((always_inline))
871 static inline int __attribute__((always_inline))
874 if (r->prod.sp_enqueue)
897 static inline int __attribute__((always_inline))
900 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
918 static inline int __attribute__((always_inline))
921 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
942 static inline int __attribute__((always_inline))
945 if (r->cons.sc_dequeue)
966 static inline int __attribute__((always_inline))
984 static inline int __attribute__((always_inline))
1006 static inline int __attribute__((always_inline))
1009 if (r->cons.sc_dequeue)
1027 uint32_t prod_tail = r->prod.tail;
1028 uint32_t cons_tail = r->cons.
tail;
1029 return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
1044 uint32_t prod_tail = r->prod.tail;
1045 uint32_t cons_tail = r->cons.
tail;
1046 return !!(cons_tail == prod_tail);
1057 static inline unsigned
1060 uint32_t prod_tail = r->prod.tail;
1061 uint32_t cons_tail = r->cons.
tail;
1062 return ((prod_tail - cons_tail) & r->prod.mask);
1073 static inline unsigned
1076 uint32_t prod_tail = r->prod.tail;
1077 uint32_t cons_tail = r->cons.
tail;
1078 return ((cons_tail - prod_tail - 1) & r->prod.mask);
1116 static inline unsigned __attribute__((always_inline))
1120 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1135 static inline unsigned __attribute__((always_inline))
1139 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1158 static inline unsigned __attribute__((always_inline))
1162 if (r->prod.sp_enqueue)
1185 static inline unsigned __attribute__((always_inline))
1188 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1205 static inline unsigned __attribute__((always_inline))
1208 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1227 static inline unsigned __attribute__((always_inline))
1230 if (r->cons.sc_dequeue)