DPDK  18.05.1
rte_ring.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2017 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_H_
11 #define _RTE_RING_H_
12 
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
38 #include <stdio.h>
39 #include <stdint.h>
40 #include <sys/queue.h>
41 #include <errno.h>
42 #include <rte_common.h>
43 #include <rte_config.h>
44 #include <rte_memory.h>
45 #include <rte_lcore.h>
46 #include <rte_atomic.h>
47 #include <rte_branch_prediction.h>
48 #include <rte_memzone.h>
49 #include <rte_pause.h>
50 
51 #define RTE_TAILQ_RING_NAME "RTE_RING"
52 
53 enum rte_ring_queue_behavior {
54  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
55  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
56 };
57 
58 #define RTE_RING_MZ_PREFIX "RG_"
59 
60 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
61  sizeof(RTE_RING_MZ_PREFIX) + 1)
62 
63 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
64 
65 /* structure to hold a pair of head/tail values and other metadata */
66 struct rte_ring_headtail {
67  volatile uint32_t head;
68  volatile uint32_t tail;
69  uint32_t single;
70 };
71 
82 struct rte_ring {
83  /*
84  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
85  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
86  * next time the ABI changes
87  */
89  int flags;
90  const struct rte_memzone *memzone;
92  uint32_t size;
93  uint32_t mask;
94  uint32_t capacity;
96  char pad0 __rte_cache_aligned;
99  struct rte_ring_headtail prod __rte_cache_aligned;
103  struct rte_ring_headtail cons __rte_cache_aligned;
105 };
106 
107 #define RING_F_SP_ENQ 0x0001
108 #define RING_F_SC_DEQ 0x0002
117 #define RING_F_EXACT_SZ 0x0004
118 #define RTE_RING_SZ_MASK (0x7fffffffU)
120 /* @internal defines for passing to the enqueue dequeue worker functions */
121 #define __IS_SP 1
122 #define __IS_MP 0
123 #define __IS_SC 1
124 #define __IS_MC 0
125 
140 ssize_t rte_ring_get_memsize(unsigned count);
141 
176 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
177  unsigned flags);
178 
218 struct rte_ring *rte_ring_create(const char *name, unsigned count,
219  int socket_id, unsigned flags);
226 void rte_ring_free(struct rte_ring *r);
227 
236 void rte_ring_dump(FILE *f, const struct rte_ring *r);
237 
238 /* the actual enqueue of pointers on the ring.
239  * Placed here since identical code needed in both
240  * single and multi producer enqueue functions */
241 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
242  unsigned int i; \
243  const uint32_t size = (r)->size; \
244  uint32_t idx = prod_head & (r)->mask; \
245  obj_type *ring = (obj_type *)ring_start; \
246  if (likely(idx + n < size)) { \
247  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
248  ring[idx] = obj_table[i]; \
249  ring[idx+1] = obj_table[i+1]; \
250  ring[idx+2] = obj_table[i+2]; \
251  ring[idx+3] = obj_table[i+3]; \
252  } \
253  switch (n & 0x3) { \
254  case 3: \
255  ring[idx++] = obj_table[i++]; /* fallthrough */ \
256  case 2: \
257  ring[idx++] = obj_table[i++]; /* fallthrough */ \
258  case 1: \
259  ring[idx++] = obj_table[i++]; \
260  } \
261  } else { \
262  for (i = 0; idx < size; i++, idx++)\
263  ring[idx] = obj_table[i]; \
264  for (idx = 0; i < n; i++, idx++) \
265  ring[idx] = obj_table[i]; \
266  } \
267 } while (0)
268 
269 /* the actual copy of pointers on the ring to obj_table.
270  * Placed here since identical code needed in both
271  * single and multi consumer dequeue functions */
272 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
273  unsigned int i; \
274  uint32_t idx = cons_head & (r)->mask; \
275  const uint32_t size = (r)->size; \
276  obj_type *ring = (obj_type *)ring_start; \
277  if (likely(idx + n < size)) { \
278  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
279  obj_table[i] = ring[idx]; \
280  obj_table[i+1] = ring[idx+1]; \
281  obj_table[i+2] = ring[idx+2]; \
282  obj_table[i+3] = ring[idx+3]; \
283  } \
284  switch (n & 0x3) { \
285  case 3: \
286  obj_table[i++] = ring[idx++]; /* fallthrough */ \
287  case 2: \
288  obj_table[i++] = ring[idx++]; /* fallthrough */ \
289  case 1: \
290  obj_table[i++] = ring[idx++]; \
291  } \
292  } else { \
293  for (i = 0; idx < size; i++, idx++) \
294  obj_table[i] = ring[idx]; \
295  for (idx = 0; i < n; i++, idx++) \
296  obj_table[i] = ring[idx]; \
297  } \
298 } while (0)
299 
300 /* Between load and load. there might be cpu reorder in weak model
301  * (powerpc/arm).
302  * There are 2 choices for the users
303  * 1.use rmb() memory barrier
304  * 2.use one-direcion load_acquire/store_release barrier,defined by
305  * CONFIG_RTE_RING_USE_C11_MEM_MODEL=y
306  * It depends on performance test results.
307  * By default, move common functions to rte_ring_generic.h
308  */
309 #ifdef RTE_RING_USE_C11_MEM_MODEL
310 #include "rte_ring_c11_mem.h"
311 #else
312 #include "rte_ring_generic.h"
313 #endif
314 
335 static __rte_always_inline unsigned int
336 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
337  unsigned int n, enum rte_ring_queue_behavior behavior,
338  unsigned int is_sp, unsigned int *free_space)
339 {
340  uint32_t prod_head, prod_next;
341  uint32_t free_entries;
342 
343  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
344  &prod_head, &prod_next, &free_entries);
345  if (n == 0)
346  goto end;
347 
348  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
349 
350  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
351 end:
352  if (free_space != NULL)
353  *free_space = free_entries - n;
354  return n;
355 }
356 
377 static __rte_always_inline unsigned int
378 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
379  unsigned int n, enum rte_ring_queue_behavior behavior,
380  unsigned int is_sc, unsigned int *available)
381 {
382  uint32_t cons_head, cons_next;
383  uint32_t entries;
384 
385  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
386  &cons_head, &cons_next, &entries);
387  if (n == 0)
388  goto end;
389 
390  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
391 
392  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
393 
394 end:
395  if (available != NULL)
396  *available = entries - n;
397  return n;
398 }
399 
418 static __rte_always_inline unsigned int
419 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
420  unsigned int n, unsigned int *free_space)
421 {
422  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
423  __IS_MP, free_space);
424 }
425 
441 static __rte_always_inline unsigned int
442 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
443  unsigned int n, unsigned int *free_space)
444 {
445  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
446  __IS_SP, free_space);
447 }
448 
468 static __rte_always_inline unsigned int
469 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
470  unsigned int n, unsigned int *free_space)
471 {
472  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
473  r->prod.single, free_space);
474 }
475 
490 static __rte_always_inline int
491 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
492 {
493  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
494 }
495 
507 static __rte_always_inline int
508 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
509 {
510  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
511 }
512 
528 static __rte_always_inline int
529 rte_ring_enqueue(struct rte_ring *r, void *obj)
530 {
531  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
532 }
533 
552 static __rte_always_inline unsigned int
553 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
554  unsigned int n, unsigned int *available)
555 {
556  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
557  __IS_MC, available);
558 }
559 
576 static __rte_always_inline unsigned int
577 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
578  unsigned int n, unsigned int *available)
579 {
580  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
581  __IS_SC, available);
582 }
583 
603 static __rte_always_inline unsigned int
604 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
605  unsigned int *available)
606 {
607  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
608  r->cons.single, available);
609 }
610 
626 static __rte_always_inline int
627 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
628 {
629  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
630 }
631 
644 static __rte_always_inline int
645 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
646 {
647  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
648 }
649 
666 static __rte_always_inline int
667 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
668 {
669  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
670 }
671 
680 static inline unsigned
681 rte_ring_count(const struct rte_ring *r)
682 {
683  uint32_t prod_tail = r->prod.tail;
684  uint32_t cons_tail = r->cons.tail;
685  uint32_t count = (prod_tail - cons_tail) & r->mask;
686  return (count > r->capacity) ? r->capacity : count;
687 }
688 
697 static inline unsigned
699 {
700  return r->capacity - rte_ring_count(r);
701 }
702 
712 static inline int
713 rte_ring_full(const struct rte_ring *r)
714 {
715  return rte_ring_free_count(r) == 0;
716 }
717 
727 static inline int
728 rte_ring_empty(const struct rte_ring *r)
729 {
730  return rte_ring_count(r) == 0;
731 }
732 
743 static inline unsigned int
744 rte_ring_get_size(const struct rte_ring *r)
745 {
746  return r->size;
747 }
748 
757 static inline unsigned int
759 {
760  return r->capacity;
761 }
762 
769 void rte_ring_list_dump(FILE *f);
770 
781 struct rte_ring *rte_ring_lookup(const char *name);
782 
801 static __rte_always_inline unsigned
802 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
803  unsigned int n, unsigned int *free_space)
804 {
805  return __rte_ring_do_enqueue(r, obj_table, n,
806  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
807 }
808 
824 static __rte_always_inline unsigned
825 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
826  unsigned int n, unsigned int *free_space)
827 {
828  return __rte_ring_do_enqueue(r, obj_table, n,
829  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
830 }
831 
851 static __rte_always_inline unsigned
852 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
853  unsigned int n, unsigned int *free_space)
854 {
855  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
856  r->prod.single, free_space);
857 }
858 
879 static __rte_always_inline unsigned
880 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
881  unsigned int n, unsigned int *available)
882 {
883  return __rte_ring_do_dequeue(r, obj_table, n,
884  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
885 }
886 
904 static __rte_always_inline unsigned
905 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
906  unsigned int n, unsigned int *available)
907 {
908  return __rte_ring_do_dequeue(r, obj_table, n,
909  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
910 }
911 
931 static __rte_always_inline unsigned
932 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
933  unsigned int n, unsigned int *available)
934 {
935  return __rte_ring_do_dequeue(r, obj_table, n,
936  RTE_RING_QUEUE_VARIABLE,
937  r->cons.single, available);
938 }
939 
940 #ifdef __cplusplus
941 }
942 #endif
943 
944 #endif /* _RTE_RING_H_ */