96 #include <sys/queue.h>
99 #include <rte_config.h>
107 #define RTE_TAILQ_RING_NAME "RTE_RING"
109 enum rte_ring_queue_behavior {
110 RTE_RING_QUEUE_FIXED = 0,
111 RTE_RING_QUEUE_VARIABLE
114 #define RTE_RING_MZ_PREFIX "RG_"
116 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
117 sizeof(RTE_RING_MZ_PREFIX) + 1)
121 #if RTE_CACHE_LINE_SIZE < 128
122 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
123 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
125 #define PROD_ALIGN RTE_CACHE_LINE_SIZE
126 #define CONS_ALIGN RTE_CACHE_LINE_SIZE
130 struct rte_ring_headtail {
131 volatile uint32_t head;
132 volatile uint32_t tail;
167 #define RING_F_SP_ENQ 0x0001
168 #define RING_F_SC_DEQ 0x0002
177 #define RING_F_EXACT_SZ 0x0004
178 #define RTE_RING_SZ_MASK (0x7fffffffU)
279 int socket_id,
unsigned flags);
301 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
303 const uint32_t size = (r)->size; \
304 uint32_t idx = prod_head & (r)->mask; \
305 obj_type *ring = (obj_type *)ring_start; \
306 if (likely(idx + n < size)) { \
307 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
308 ring[idx] = obj_table[i]; \
309 ring[idx+1] = obj_table[i+1]; \
310 ring[idx+2] = obj_table[i+2]; \
311 ring[idx+3] = obj_table[i+3]; \
315 ring[idx++] = obj_table[i++]; \
317 ring[idx++] = obj_table[i++]; \
319 ring[idx++] = obj_table[i++]; \
322 for (i = 0; idx < size; i++, idx++)\
323 ring[idx] = obj_table[i]; \
324 for (idx = 0; i < n; i++, idx++) \
325 ring[idx] = obj_table[i]; \
332 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
334 uint32_t idx = cons_head & (r)->mask; \
335 const uint32_t size = (r)->size; \
336 obj_type *ring = (obj_type *)ring_start; \
337 if (likely(idx + n < size)) { \
338 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
339 obj_table[i] = ring[idx]; \
340 obj_table[i+1] = ring[idx+1]; \
341 obj_table[i+2] = ring[idx+2]; \
342 obj_table[i+3] = ring[idx+3]; \
346 obj_table[i++] = ring[idx++]; \
348 obj_table[i++] = ring[idx++]; \
350 obj_table[i++] = ring[idx++]; \
353 for (i = 0; idx < size; i++, idx++) \
354 obj_table[i] = ring[idx]; \
355 for (idx = 0; i < n; i++, idx++) \
356 obj_table[i] = ring[idx]; \
361 update_tail(
struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
369 while (
unlikely(ht->tail != old_val))
399 __rte_ring_move_prod_head(
struct rte_ring *r,
unsigned int is_sp,
400 unsigned int n,
enum rte_ring_queue_behavior behavior,
401 uint32_t *old_head, uint32_t *new_head,
402 uint32_t *free_entries)
405 unsigned int max = n;
412 *old_head = r->prod.head;
425 *free_entries = (capacity + r->cons.tail - *old_head);
429 n = (behavior == RTE_RING_QUEUE_FIXED) ?
435 *new_head = *old_head + n;
437 r->prod.head = *new_head, success = 1;
440 *old_head, *new_head);
466 __rte_ring_do_enqueue(
struct rte_ring *r,
void *
const *obj_table,
467 unsigned int n,
enum rte_ring_queue_behavior behavior,
468 unsigned int is_sp,
unsigned int *free_space)
470 uint32_t prod_head, prod_next;
471 uint32_t free_entries;
473 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
474 &prod_head, &prod_next, &free_entries);
478 ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n,
void *);
481 update_tail(&r->prod, prod_head, prod_next, is_sp);
483 if (free_space != NULL)
484 *free_space = free_entries - n;
512 __rte_ring_move_cons_head(
struct rte_ring *r,
unsigned int is_sc,
513 unsigned int n,
enum rte_ring_queue_behavior behavior,
514 uint32_t *old_head, uint32_t *new_head,
517 unsigned int max = n;
525 *old_head = r->cons.head;
536 *entries = (r->prod.tail - *old_head);
540 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
545 *new_head = *old_head + n;
547 r->cons.head = *new_head;
579 __rte_ring_do_dequeue(
struct rte_ring *r,
void **obj_table,
580 unsigned int n,
enum rte_ring_queue_behavior behavior,
581 unsigned int is_sc,
unsigned int *available)
583 uint32_t cons_head, cons_next;
586 n = __rte_ring_move_cons_head(r, (
int)is_sc, n, behavior,
587 &cons_head, &cons_next, &entries);
591 DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n,
void *);
594 update_tail(&r->cons, cons_head, cons_next, is_sc);
597 if (available != NULL)
598 *available = entries - n;
622 unsigned int n,
unsigned int *free_space)
624 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
625 __IS_MP, free_space);
645 unsigned int n,
unsigned int *free_space)
647 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
648 __IS_SP, free_space);
672 unsigned int n,
unsigned int *free_space)
674 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
675 r->prod.single, free_space);
756 unsigned int n,
unsigned int *available)
758 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
780 unsigned int n,
unsigned int *available)
782 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
807 unsigned int *available)
809 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
810 r->cons.single, available);
882 static inline unsigned
885 uint32_t prod_tail = r->prod.tail;
886 uint32_t cons_tail = r->cons.tail;
887 uint32_t count = (prod_tail - cons_tail) & r->
mask;
899 static inline unsigned
945 static inline unsigned int
959 static inline unsigned int
1005 unsigned int n,
unsigned int *free_space)
1007 return __rte_ring_do_enqueue(r, obj_table, n,
1008 RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
1028 unsigned int n,
unsigned int *free_space)
1030 return __rte_ring_do_enqueue(r, obj_table, n,
1031 RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
1055 unsigned int n,
unsigned int *free_space)
1057 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1058 r->prod.single, free_space);
1083 unsigned int n,
unsigned int *available)
1085 return __rte_ring_do_dequeue(r, obj_table, n,
1086 RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1108 unsigned int n,
unsigned int *available)
1110 return __rte_ring_do_dequeue(r, obj_table, n,
1111 RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1135 unsigned int n,
unsigned int *available)
1137 return __rte_ring_do_dequeue(r, obj_table, n,
1138 RTE_RING_QUEUE_VARIABLE,
1139 r->cons.single, available);
static void rte_smp_rmb(void)
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#define __rte_always_inline
const struct rte_memzone * memzone
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN)
static int rte_ring_empty(const struct rte_ring *r)
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
static unsigned int rte_ring_get_size(const struct rte_ring *r)
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_free(struct rte_ring *r)
static void rte_pause(void)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static void rte_smp_wmb(void)
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
struct rte_ring * rte_ring_lookup(const char *name)
static unsigned rte_ring_free_count(const struct rte_ring *r)
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE