DPDK  19.11.14
rte_ring.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2017 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_H_
11 #define _RTE_RING_H_
12 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 #include <stdio.h>
40 #include <stdint.h>
41 #include <sys/queue.h>
42 #include <errno.h>
43 #include <rte_common.h>
44 #include <rte_config.h>
45 #include <rte_memory.h>
46 #include <rte_lcore.h>
47 #include <rte_atomic.h>
48 #include <rte_branch_prediction.h>
49 #include <rte_memzone.h>
50 #include <rte_pause.h>
51 
52 #define RTE_TAILQ_RING_NAME "RTE_RING"
53 
54 enum rte_ring_queue_behavior {
55  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
56  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
57 };
58 
59 #define RTE_RING_MZ_PREFIX "RG_"
60 
61 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
62  sizeof(RTE_RING_MZ_PREFIX) + 1)
63 
64 /* structure to hold a pair of head/tail values and other metadata */
65 struct rte_ring_headtail {
66  volatile uint32_t head;
67  volatile uint32_t tail;
68  uint32_t single;
69 };
70 
81 struct rte_ring {
82  /*
83  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
84  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
85  * next time the ABI changes
86  */
88  int flags;
89  const struct rte_memzone *memzone;
91  uint32_t size;
92  uint32_t mask;
93  uint32_t capacity;
95  char pad0 __rte_cache_aligned;
98  struct rte_ring_headtail prod __rte_cache_aligned;
99  char pad1 __rte_cache_aligned;
102  struct rte_ring_headtail cons __rte_cache_aligned;
104 };
105 
106 #define RING_F_SP_ENQ 0x0001
107 #define RING_F_SC_DEQ 0x0002
116 #define RING_F_EXACT_SZ 0x0004
117 #define RTE_RING_SZ_MASK (0x7fffffffU)
119 /* @internal defines for passing to the enqueue dequeue worker functions */
120 #define __IS_SP 1
121 #define __IS_MP 0
122 #define __IS_SC 1
123 #define __IS_MC 0
124 
139 ssize_t rte_ring_get_memsize(unsigned count);
140 
174 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
175  unsigned flags);
176 
215 struct rte_ring *rte_ring_create(const char *name, unsigned count,
216  int socket_id, unsigned flags);
223 void rte_ring_free(struct rte_ring *r);
224 
233 void rte_ring_dump(FILE *f, const struct rte_ring *r);
234 
235 /* the actual enqueue of pointers on the ring.
236  * Placed here since identical code needed in both
237  * single and multi producer enqueue functions */
238 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
239  unsigned int i; \
240  const uint32_t size = (r)->size; \
241  uint32_t idx = prod_head & (r)->mask; \
242  obj_type *ring = (obj_type *)ring_start; \
243  if (likely(idx + n < size)) { \
244  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
245  ring[idx] = obj_table[i]; \
246  ring[idx+1] = obj_table[i+1]; \
247  ring[idx+2] = obj_table[i+2]; \
248  ring[idx+3] = obj_table[i+3]; \
249  } \
250  switch (n & 0x3) { \
251  case 3: \
252  ring[idx++] = obj_table[i++]; /* fallthrough */ \
253  case 2: \
254  ring[idx++] = obj_table[i++]; /* fallthrough */ \
255  case 1: \
256  ring[idx++] = obj_table[i++]; \
257  } \
258  } else { \
259  for (i = 0; idx < size; i++, idx++)\
260  ring[idx] = obj_table[i]; \
261  for (idx = 0; i < n; i++, idx++) \
262  ring[idx] = obj_table[i]; \
263  } \
264 } while (0)
265 
266 /* the actual copy of pointers on the ring to obj_table.
267  * Placed here since identical code needed in both
268  * single and multi consumer dequeue functions */
269 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
270  unsigned int i; \
271  uint32_t idx = cons_head & (r)->mask; \
272  const uint32_t size = (r)->size; \
273  obj_type *ring = (obj_type *)ring_start; \
274  if (likely(idx + n < size)) { \
275  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
276  obj_table[i] = ring[idx]; \
277  obj_table[i+1] = ring[idx+1]; \
278  obj_table[i+2] = ring[idx+2]; \
279  obj_table[i+3] = ring[idx+3]; \
280  } \
281  switch (n & 0x3) { \
282  case 3: \
283  obj_table[i++] = ring[idx++]; /* fallthrough */ \
284  case 2: \
285  obj_table[i++] = ring[idx++]; /* fallthrough */ \
286  case 1: \
287  obj_table[i++] = ring[idx++]; \
288  } \
289  } else { \
290  for (i = 0; idx < size; i++, idx++) \
291  obj_table[i] = ring[idx]; \
292  for (idx = 0; i < n; i++, idx++) \
293  obj_table[i] = ring[idx]; \
294  } \
295 } while (0)
296 
297 /* Between load and load. there might be cpu reorder in weak model
298  * (powerpc/arm).
299  * There are 2 choices for the users
300  * 1.use rmb() memory barrier
301  * 2.use one-direction load_acquire/store_release barrier,defined by
302  * CONFIG_RTE_USE_C11_MEM_MODEL=y
303  * It depends on performance test results.
304  * By default, move common functions to rte_ring_generic.h
305  */
306 #ifdef RTE_USE_C11_MEM_MODEL
307 #include "rte_ring_c11_mem.h"
308 #else
309 #include "rte_ring_generic.h"
310 #endif
311 
332 static __rte_always_inline unsigned int
333 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
334  unsigned int n, enum rte_ring_queue_behavior behavior,
335  unsigned int is_sp, unsigned int *free_space)
336 {
337  uint32_t prod_head, prod_next;
338  uint32_t free_entries;
339 
340  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
341  &prod_head, &prod_next, &free_entries);
342  if (n == 0)
343  goto end;
344 
345  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
346 
347  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
348 end:
349  if (free_space != NULL)
350  *free_space = free_entries - n;
351  return n;
352 }
353 
374 static __rte_always_inline unsigned int
375 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
376  unsigned int n, enum rte_ring_queue_behavior behavior,
377  unsigned int is_sc, unsigned int *available)
378 {
379  uint32_t cons_head, cons_next;
380  uint32_t entries;
381 
382  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
383  &cons_head, &cons_next, &entries);
384  if (n == 0)
385  goto end;
386 
387  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
388 
389  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
390 
391 end:
392  if (available != NULL)
393  *available = entries - n;
394  return n;
395 }
396 
415 static __rte_always_inline unsigned int
416 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
417  unsigned int n, unsigned int *free_space)
418 {
419  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
420  __IS_MP, free_space);
421 }
422 
438 static __rte_always_inline unsigned int
439 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
440  unsigned int n, unsigned int *free_space)
441 {
442  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
443  __IS_SP, free_space);
444 }
445 
465 static __rte_always_inline unsigned int
466 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
467  unsigned int n, unsigned int *free_space)
468 {
469  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
470  r->prod.single, free_space);
471 }
472 
487 static __rte_always_inline int
488 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
489 {
490  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
491 }
492 
504 static __rte_always_inline int
505 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
506 {
507  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
508 }
509 
525 static __rte_always_inline int
526 rte_ring_enqueue(struct rte_ring *r, void *obj)
527 {
528  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
529 }
530 
549 static __rte_always_inline unsigned int
550 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
551  unsigned int n, unsigned int *available)
552 {
553  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
554  __IS_MC, available);
555 }
556 
573 static __rte_always_inline unsigned int
574 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
575  unsigned int n, unsigned int *available)
576 {
577  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
578  __IS_SC, available);
579 }
580 
600 static __rte_always_inline unsigned int
601 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
602  unsigned int *available)
603 {
604  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
605  r->cons.single, available);
606 }
607 
623 static __rte_always_inline int
624 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
625 {
626  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
627 }
628 
641 static __rte_always_inline int
642 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
643 {
644  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
645 }
646 
663 static __rte_always_inline int
664 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
665 {
666  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
667 }
668 
682 __rte_experimental
683 void
684 rte_ring_reset(struct rte_ring *r);
685 
694 static inline unsigned
695 rte_ring_count(const struct rte_ring *r)
696 {
697  uint32_t prod_tail = r->prod.tail;
698  uint32_t cons_tail = r->cons.tail;
699  uint32_t count = (prod_tail - cons_tail) & r->mask;
700  return (count > r->capacity) ? r->capacity : count;
701 }
702 
711 static inline unsigned
713 {
714  return r->capacity - rte_ring_count(r);
715 }
716 
726 static inline int
727 rte_ring_full(const struct rte_ring *r)
728 {
729  return rte_ring_free_count(r) == 0;
730 }
731 
741 static inline int
742 rte_ring_empty(const struct rte_ring *r)
743 {
744  return rte_ring_count(r) == 0;
745 }
746 
757 static inline unsigned int
758 rte_ring_get_size(const struct rte_ring *r)
759 {
760  return r->size;
761 }
762 
771 static inline unsigned int
773 {
774  return r->capacity;
775 }
776 
783 void rte_ring_list_dump(FILE *f);
784 
795 struct rte_ring *rte_ring_lookup(const char *name);
796 
815 static __rte_always_inline unsigned
816 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
817  unsigned int n, unsigned int *free_space)
818 {
819  return __rte_ring_do_enqueue(r, obj_table, n,
820  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
821 }
822 
838 static __rte_always_inline unsigned
839 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
840  unsigned int n, unsigned int *free_space)
841 {
842  return __rte_ring_do_enqueue(r, obj_table, n,
843  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
844 }
845 
865 static __rte_always_inline unsigned
866 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
867  unsigned int n, unsigned int *free_space)
868 {
869  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
870  r->prod.single, free_space);
871 }
872 
893 static __rte_always_inline unsigned
894 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
895  unsigned int n, unsigned int *available)
896 {
897  return __rte_ring_do_dequeue(r, obj_table, n,
898  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
899 }
900 
918 static __rte_always_inline unsigned
919 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
920  unsigned int n, unsigned int *available)
921 {
922  return __rte_ring_do_dequeue(r, obj_table, n,
923  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
924 }
925 
945 static __rte_always_inline unsigned
946 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
947  unsigned int n, unsigned int *available)
948 {
949  return __rte_ring_do_dequeue(r, obj_table, n,
950  RTE_RING_QUEUE_VARIABLE,
951  r->cons.single, available);
952 }
953 
954 #ifdef __cplusplus
955 }
956 #endif
957 
958 #endif /* _RTE_RING_H_ */
#define __rte_always_inline
Definition: rte_common.h:158
const struct rte_memzone * memzone
Definition: rte_ring.h:89
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:466
char pad2 __rte_cache_aligned
Definition: rte_ring.h:103
int flags
Definition: rte_ring.h:88
char name [RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
Definition: rte_ring.h:87
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:664
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:894
static int rte_ring_empty(const struct rte_ring *r)
Definition: rte_ring.h:742
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:816
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:624
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:416
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:505
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:488
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
Definition: rte_ring.h:772
static unsigned int rte_ring_get_size(const struct rte_ring *r)
Definition: rte_ring.h:758
char pad0 __rte_cache_aligned
Definition: rte_ring.h:95
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:642
uint32_t size
Definition: rte_ring.h:91
char pad1 __rte_cache_aligned
Definition: rte_ring.h:99
void rte_ring_free(struct rte_ring *r)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:839
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:919
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:946
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:439
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
Definition: rte_ring.h:695
uint32_t mask
Definition: rte_ring.h:92
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:601
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:574
struct rte_ring * rte_ring_lookup(const char *name)
uint32_t capacity
Definition: rte_ring.h:93
static unsigned rte_ring_free_count(const struct rte_ring *r)
Definition: rte_ring.h:712
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:526
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:866
__rte_experimental void rte_ring_reset(struct rte_ring *r)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:550
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
Definition: rte_ring.h:727
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE
Definition: rte_memzone.h:51