DPDK  18.11.11
rte_ring.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2017 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_H_
11 #define _RTE_RING_H_
12 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 #include <stdio.h>
40 #include <stdint.h>
41 #include <sys/queue.h>
42 #include <errno.h>
43 #include <rte_common.h>
44 #include <rte_config.h>
45 #include <rte_memory.h>
46 #include <rte_lcore.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_memzone.h>
50 #include <rte_pause.h>
51 
52 #define RTE_TAILQ_RING_NAME "RTE_RING"
53 
54 enum rte_ring_queue_behavior {
55  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
56  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
57 };
58 
59 #define RTE_RING_MZ_PREFIX "RG_"
60 
61 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
62  sizeof(RTE_RING_MZ_PREFIX) + 1)
63 
64 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
65 
66 /* structure to hold a pair of head/tail values and other metadata */
67 struct rte_ring_headtail {
68  volatile uint32_t head;
69  volatile uint32_t tail;
70  uint32_t single;
71 };
72 
83 struct rte_ring {
84  /*
85  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
86  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
87  * next time the ABI changes
88  */
90  int flags;
91  const struct rte_memzone *memzone;
93  uint32_t size;
94  uint32_t mask;
95  uint32_t capacity;
97  char pad0 __rte_cache_aligned;
100  struct rte_ring_headtail prod __rte_cache_aligned;
104  struct rte_ring_headtail cons __rte_cache_aligned;
106 };
107 
108 #define RING_F_SP_ENQ 0x0001
109 #define RING_F_SC_DEQ 0x0002
118 #define RING_F_EXACT_SZ 0x0004
119 #define RTE_RING_SZ_MASK (0x7fffffffU)
121 /* @internal defines for passing to the enqueue dequeue worker functions */
122 #define __IS_SP 1
123 #define __IS_MP 0
124 #define __IS_SC 1
125 #define __IS_MC 0
126 
141 ssize_t rte_ring_get_memsize(unsigned count);
142 
177 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
178  unsigned flags);
179 
219 struct rte_ring *rte_ring_create(const char *name, unsigned count,
220  int socket_id, unsigned flags);
227 void rte_ring_free(struct rte_ring *r);
228 
237 void rte_ring_dump(FILE *f, const struct rte_ring *r);
238 
239 /* the actual enqueue of pointers on the ring.
240  * Placed here since identical code needed in both
241  * single and multi producer enqueue functions */
242 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
243  unsigned int i; \
244  const uint32_t size = (r)->size; \
245  uint32_t idx = prod_head & (r)->mask; \
246  obj_type *ring = (obj_type *)ring_start; \
247  if (likely(idx + n < size)) { \
248  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
249  ring[idx] = obj_table[i]; \
250  ring[idx+1] = obj_table[i+1]; \
251  ring[idx+2] = obj_table[i+2]; \
252  ring[idx+3] = obj_table[i+3]; \
253  } \
254  switch (n & 0x3) { \
255  case 3: \
256  ring[idx++] = obj_table[i++]; /* fallthrough */ \
257  case 2: \
258  ring[idx++] = obj_table[i++]; /* fallthrough */ \
259  case 1: \
260  ring[idx++] = obj_table[i++]; \
261  } \
262  } else { \
263  for (i = 0; idx < size; i++, idx++)\
264  ring[idx] = obj_table[i]; \
265  for (idx = 0; i < n; i++, idx++) \
266  ring[idx] = obj_table[i]; \
267  } \
268 } while (0)
269 
270 /* the actual copy of pointers on the ring to obj_table.
271  * Placed here since identical code needed in both
272  * single and multi consumer dequeue functions */
273 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
274  unsigned int i; \
275  uint32_t idx = cons_head & (r)->mask; \
276  const uint32_t size = (r)->size; \
277  obj_type *ring = (obj_type *)ring_start; \
278  if (likely(idx + n < size)) { \
279  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
280  obj_table[i] = ring[idx]; \
281  obj_table[i+1] = ring[idx+1]; \
282  obj_table[i+2] = ring[idx+2]; \
283  obj_table[i+3] = ring[idx+3]; \
284  } \
285  switch (n & 0x3) { \
286  case 3: \
287  obj_table[i++] = ring[idx++]; /* fallthrough */ \
288  case 2: \
289  obj_table[i++] = ring[idx++]; /* fallthrough */ \
290  case 1: \
291  obj_table[i++] = ring[idx++]; \
292  } \
293  } else { \
294  for (i = 0; idx < size; i++, idx++) \
295  obj_table[i] = ring[idx]; \
296  for (idx = 0; i < n; i++, idx++) \
297  obj_table[i] = ring[idx]; \
298  } \
299 } while (0)
300 
301 /* Between load and load. there might be cpu reorder in weak model
302  * (powerpc/arm).
303  * There are 2 choices for the users
304  * 1.use rmb() memory barrier
305  * 2.use one-direction load_acquire/store_release barrier,defined by
306  * CONFIG_RTE_USE_C11_MEM_MODEL=y
307  * It depends on performance test results.
308  * By default, move common functions to rte_ring_generic.h
309  */
310 #ifdef RTE_USE_C11_MEM_MODEL
311 #include "rte_ring_c11_mem.h"
312 #else
313 #include "rte_ring_generic.h"
314 #endif
315 
336 static __rte_always_inline unsigned int
337 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
338  unsigned int n, enum rte_ring_queue_behavior behavior,
339  unsigned int is_sp, unsigned int *free_space)
340 {
341  uint32_t prod_head, prod_next;
342  uint32_t free_entries;
343 
344  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
345  &prod_head, &prod_next, &free_entries);
346  if (n == 0)
347  goto end;
348 
349  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
350 
351  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
352 end:
353  if (free_space != NULL)
354  *free_space = free_entries - n;
355  return n;
356 }
357 
378 static __rte_always_inline unsigned int
379 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
380  unsigned int n, enum rte_ring_queue_behavior behavior,
381  unsigned int is_sc, unsigned int *available)
382 {
383  uint32_t cons_head, cons_next;
384  uint32_t entries;
385 
386  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
387  &cons_head, &cons_next, &entries);
388  if (n == 0)
389  goto end;
390 
391  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
392 
393  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
394 
395 end:
396  if (available != NULL)
397  *available = entries - n;
398  return n;
399 }
400 
419 static __rte_always_inline unsigned int
420 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
421  unsigned int n, unsigned int *free_space)
422 {
423  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
424  __IS_MP, free_space);
425 }
426 
442 static __rte_always_inline unsigned int
443 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
444  unsigned int n, unsigned int *free_space)
445 {
446  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
447  __IS_SP, free_space);
448 }
449 
469 static __rte_always_inline unsigned int
470 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
471  unsigned int n, unsigned int *free_space)
472 {
473  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
474  r->prod.single, free_space);
475 }
476 
491 static __rte_always_inline int
492 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
493 {
494  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
495 }
496 
508 static __rte_always_inline int
509 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
510 {
511  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
512 }
513 
529 static __rte_always_inline int
530 rte_ring_enqueue(struct rte_ring *r, void *obj)
531 {
532  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
533 }
534 
553 static __rte_always_inline unsigned int
554 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
555  unsigned int n, unsigned int *available)
556 {
557  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
558  __IS_MC, available);
559 }
560 
577 static __rte_always_inline unsigned int
578 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
579  unsigned int n, unsigned int *available)
580 {
581  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
582  __IS_SC, available);
583 }
584 
604 static __rte_always_inline unsigned int
605 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
606  unsigned int *available)
607 {
608  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
609  r->cons.single, available);
610 }
611 
627 static __rte_always_inline int
628 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
629 {
630  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
631 }
632 
645 static __rte_always_inline int
646 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
647 {
648  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
649 }
650 
667 static __rte_always_inline int
668 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
669 {
670  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
671 }
672 
681 static inline unsigned
682 rte_ring_count(const struct rte_ring *r)
683 {
684  uint32_t prod_tail = r->prod.tail;
685  uint32_t cons_tail = r->cons.tail;
686  uint32_t count = (prod_tail - cons_tail) & r->mask;
687  return (count > r->capacity) ? r->capacity : count;
688 }
689 
698 static inline unsigned
700 {
701  return r->capacity - rte_ring_count(r);
702 }
703 
713 static inline int
714 rte_ring_full(const struct rte_ring *r)
715 {
716  return rte_ring_free_count(r) == 0;
717 }
718 
728 static inline int
729 rte_ring_empty(const struct rte_ring *r)
730 {
731  return rte_ring_count(r) == 0;
732 }
733 
744 static inline unsigned int
745 rte_ring_get_size(const struct rte_ring *r)
746 {
747  return r->size;
748 }
749 
758 static inline unsigned int
760 {
761  return r->capacity;
762 }
763 
770 void rte_ring_list_dump(FILE *f);
771 
782 struct rte_ring *rte_ring_lookup(const char *name);
783 
802 static __rte_always_inline unsigned
803 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
804  unsigned int n, unsigned int *free_space)
805 {
806  return __rte_ring_do_enqueue(r, obj_table, n,
807  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
808 }
809 
825 static __rte_always_inline unsigned
826 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
827  unsigned int n, unsigned int *free_space)
828 {
829  return __rte_ring_do_enqueue(r, obj_table, n,
830  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
831 }
832 
852 static __rte_always_inline unsigned
853 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
854  unsigned int n, unsigned int *free_space)
855 {
856  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
857  r->prod.single, free_space);
858 }
859 
880 static __rte_always_inline unsigned
881 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
882  unsigned int n, unsigned int *available)
883 {
884  return __rte_ring_do_dequeue(r, obj_table, n,
885  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
886 }
887 
905 static __rte_always_inline unsigned
906 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
907  unsigned int n, unsigned int *available)
908 {
909  return __rte_ring_do_dequeue(r, obj_table, n,
910  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
911 }
912 
932 static __rte_always_inline unsigned
933 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
934  unsigned int n, unsigned int *available)
935 {
936  return __rte_ring_do_dequeue(r, obj_table, n,
937  RTE_RING_QUEUE_VARIABLE,
938  r->cons.single, available);
939 }
940 
941 #ifdef __cplusplus
942 }
943 #endif
944 
945 #endif /* _RTE_RING_H_ */
#define __rte_always_inline
Definition: rte_common.h:146
const struct rte_memzone * memzone
Definition: rte_ring.h:91
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:470
char pad2 __rte_cache_aligned
Definition: rte_ring.h:105
int flags
Definition: rte_ring.h:90
char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
Definition: rte_ring.h:89
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:668
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:881
static int rte_ring_empty(const struct rte_ring *r)
Definition: rte_ring.h:729
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:803
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:628
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:420
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:509
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:492
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
Definition: rte_ring.h:759
static unsigned int rte_ring_get_size(const struct rte_ring *r)
Definition: rte_ring.h:745
char pad0 __rte_cache_aligned
Definition: rte_ring.h:97
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:646
uint32_t size
Definition: rte_ring.h:93
char pad1 __rte_cache_aligned
Definition: rte_ring.h:101
void rte_ring_free(struct rte_ring *r)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:826
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:906
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:933
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:443
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
Definition: rte_ring.h:682
uint32_t mask
Definition: rte_ring.h:94
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:605
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:578
struct rte_ring * rte_ring_lookup(const char *name)
uint32_t capacity
Definition: rte_ring.h:95
static unsigned rte_ring_free_count(const struct rte_ring *r)
Definition: rte_ring.h:699
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:530
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:853
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:554
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
Definition: rte_ring.h:714
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE
Definition: rte_memzone.h:51