96 #include <sys/queue.h>
104 #define RTE_TAILQ_RING_NAME "RTE_RING"
106 enum rte_ring_queue_behavior {
107 RTE_RING_QUEUE_FIXED = 0,
108 RTE_RING_QUEUE_VARIABLE
111 #ifdef RTE_LIBRTE_RING_DEBUG
115 struct rte_ring_debug_stats {
116 uint64_t enq_success_bulk;
117 uint64_t enq_success_objs;
118 uint64_t enq_quota_bulk;
119 uint64_t enq_quota_objs;
120 uint64_t enq_fail_bulk;
121 uint64_t enq_fail_objs;
122 uint64_t deq_success_bulk;
123 uint64_t deq_success_objs;
124 uint64_t deq_fail_bulk;
125 uint64_t deq_fail_objs;
129 #define RTE_RING_NAMESIZE 32
130 #define RTE_RING_MZ_PREFIX "RG_"
132 #ifndef RTE_RING_PAUSE_REP_COUNT
133 #define RTE_RING_PAUSE_REP_COUNT 0
163 }
prod __rte_cache_aligned;
172 #ifdef RTE_RING_SPLIT_PROD_CONS
173 }
cons __rte_cache_aligned;
178 #ifdef RTE_LIBRTE_RING_DEBUG
179 struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
187 #define RING_F_SP_ENQ 0x0001
188 #define RING_F_SC_DEQ 0x0002
189 #define RTE_RING_QUOT_EXCEED (1 << 31)
190 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
201 #ifdef RTE_LIBRTE_RING_DEBUG
202 #define __RING_STAT_ADD(r, name, n) do { \
203 unsigned __lcore_id = rte_lcore_id(); \
204 if (__lcore_id < RTE_MAX_LCORE) { \
205 r->stats[__lcore_id].name##_objs += n; \
206 r->stats[__lcore_id].name##_bulk += 1; \
210 #define __RING_STAT_ADD(r, name, n) do {} while(0)
306 int socket_id,
unsigned flags);
348 #define ENQUEUE_PTRS() do { \
349 const uint32_t size = r->prod.size; \
350 uint32_t idx = prod_head & mask; \
351 if (likely(idx + n < size)) { \
352 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
353 r->ring[idx] = obj_table[i]; \
354 r->ring[idx+1] = obj_table[i+1]; \
355 r->ring[idx+2] = obj_table[i+2]; \
356 r->ring[idx+3] = obj_table[i+3]; \
359 case 3: r->ring[idx++] = obj_table[i++]; \
360 case 2: r->ring[idx++] = obj_table[i++]; \
361 case 1: r->ring[idx++] = obj_table[i++]; \
364 for (i = 0; idx < size; i++, idx++)\
365 r->ring[idx] = obj_table[i]; \
366 for (idx = 0; i < n; i++, idx++) \
367 r->ring[idx] = obj_table[i]; \
374 #define DEQUEUE_PTRS() do { \
375 uint32_t idx = cons_head & mask; \
376 const uint32_t size = r->cons.size; \
377 if (likely(idx + n < size)) { \
378 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
379 obj_table[i] = r->ring[idx]; \
380 obj_table[i+1] = r->ring[idx+1]; \
381 obj_table[i+2] = r->ring[idx+2]; \
382 obj_table[i+3] = r->ring[idx+3]; \
385 case 3: obj_table[i++] = r->ring[idx++]; \
386 case 2: obj_table[i++] = r->ring[idx++]; \
387 case 1: obj_table[i++] = r->ring[idx++]; \
390 for (i = 0; idx < size; i++, idx++) \
391 obj_table[i] = r->ring[idx]; \
392 for (idx = 0; i < n; i++, idx++) \
393 obj_table[i] = r->ring[idx]; \
422 static inline int __attribute__((always_inline))
423 __rte_ring_mp_do_enqueue(struct
rte_ring *r,
void * const *obj_table,
424 unsigned n, enum rte_ring_queue_behavior behavior)
426 uint32_t prod_head, prod_next;
427 uint32_t cons_tail, free_entries;
428 const unsigned max = n;
431 uint32_t mask = r->prod.mask;
444 prod_head = r->prod.head;
445 cons_tail = r->cons.tail;
450 free_entries = (mask + cons_tail - prod_head);
454 if (behavior == RTE_RING_QUEUE_FIXED) {
455 __RING_STAT_ADD(r, enq_fail, n);
461 __RING_STAT_ADD(r, enq_fail, n);
469 prod_next = prod_head + n;
479 if (
unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
480 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
482 __RING_STAT_ADD(r, enq_quota, n);
485 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
486 __RING_STAT_ADD(r, enq_success, n);
493 while (
unlikely(r->prod.tail != prod_head)) {
505 r->prod.tail = prod_next;
531 static inline int __attribute__((always_inline))
532 __rte_ring_sp_do_enqueue(struct
rte_ring *r,
void * const *obj_table,
533 unsigned n, enum rte_ring_queue_behavior behavior)
535 uint32_t prod_head, cons_tail;
536 uint32_t prod_next, free_entries;
538 uint32_t mask = r->prod.mask;
541 prod_head = r->prod.head;
542 cons_tail = r->cons.tail;
547 free_entries = mask + cons_tail - prod_head;
551 if (behavior == RTE_RING_QUEUE_FIXED) {
552 __RING_STAT_ADD(r, enq_fail, n);
558 __RING_STAT_ADD(r, enq_fail, n);
566 prod_next = prod_head + n;
567 r->prod.head = prod_next;
574 if (
unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
575 ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
577 __RING_STAT_ADD(r, enq_quota, n);
580 ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
581 __RING_STAT_ADD(r, enq_success, n);
584 r->prod.tail = prod_next;
615 static inline int __attribute__((always_inline))
616 __rte_ring_mc_do_dequeue(struct
rte_ring *r,
void **obj_table,
617 unsigned n, enum rte_ring_queue_behavior behavior)
619 uint32_t cons_head, prod_tail;
620 uint32_t cons_next, entries;
621 const unsigned max = n;
624 uint32_t mask = r->prod.mask;
636 cons_head = r->cons.head;
637 prod_tail = r->prod.tail;
642 entries = (prod_tail - cons_head);
646 if (behavior == RTE_RING_QUEUE_FIXED) {
647 __RING_STAT_ADD(r, deq_fail, n);
652 __RING_STAT_ADD(r, deq_fail, n);
660 cons_next = cons_head + n;
673 while (
unlikely(r->cons.tail != cons_head)) {
685 __RING_STAT_ADD(r, deq_success, n);
686 r->cons.tail = cons_next;
688 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
714 static inline int __attribute__((always_inline))
715 __rte_ring_sc_do_dequeue(struct
rte_ring *r,
void **obj_table,
716 unsigned n, enum rte_ring_queue_behavior behavior)
718 uint32_t cons_head, prod_tail;
719 uint32_t cons_next, entries;
721 uint32_t mask = r->prod.mask;
723 cons_head = r->cons.head;
724 prod_tail = r->prod.tail;
729 entries = prod_tail - cons_head;
732 if (behavior == RTE_RING_QUEUE_FIXED) {
733 __RING_STAT_ADD(r, deq_fail, n);
738 __RING_STAT_ADD(r, deq_fail, n);
746 cons_next = cons_head + n;
747 r->cons.head = cons_next;
753 __RING_STAT_ADD(r, deq_success, n);
754 r->cons.tail = cons_next;
755 return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
776 static inline int __attribute__((always_inline))
780 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
798 static inline int __attribute__((always_inline))
802 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
824 static inline int __attribute__((always_inline))
828 if (r->prod.sp_enqueue)
850 static inline int __attribute__((always_inline))
869 static inline int __attribute__((always_inline))
892 static inline int __attribute__((always_inline))
895 if (r->prod.sp_enqueue)
918 static inline int __attribute__((always_inline))
921 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
939 static inline int __attribute__((always_inline))
942 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
963 static inline int __attribute__((always_inline))
966 if (r->cons.sc_dequeue)
987 static inline int __attribute__((always_inline))
1005 static inline int __attribute__((always_inline))
1027 static inline int __attribute__((always_inline))
1030 if (r->cons.sc_dequeue)
1048 uint32_t prod_tail = r->prod.tail;
1049 uint32_t cons_tail = r->cons.
tail;
1050 return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
1065 uint32_t prod_tail = r->prod.tail;
1066 uint32_t cons_tail = r->cons.
tail;
1067 return !!(cons_tail == prod_tail);
1078 static inline unsigned
1081 uint32_t prod_tail = r->prod.tail;
1082 uint32_t cons_tail = r->cons.
tail;
1083 return (prod_tail - cons_tail) & r->prod.mask;
1094 static inline unsigned
1097 uint32_t prod_tail = r->prod.tail;
1098 uint32_t cons_tail = r->cons.
tail;
1099 return (cons_tail - prod_tail - 1) & r->prod.mask;
1137 static inline unsigned __attribute__((always_inline))
1141 return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1156 static inline unsigned __attribute__((always_inline))
1160 return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1179 static inline unsigned __attribute__((always_inline))
1183 if (r->prod.sp_enqueue)
1206 static inline unsigned __attribute__((always_inline))
1209 return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1226 static inline unsigned __attribute__((always_inline))
1229 return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
1248 static inline unsigned __attribute__((always_inline))
1251 if (r->cons.sc_dequeue)