DPDK  17.05.2
rte_ring.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Derived from FreeBSD's bufring.h
36  *
37  **************************************************************************
38  *
39  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions are met:
44  *
45  * 1. Redistributions of source code must retain the above copyright notice,
46  * this list of conditions and the following disclaimer.
47  *
48  * 2. The name of Kip Macy nor the names of other
49  * contributors may be used to endorse or promote products derived from
50  * this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62  * POSSIBILITY OF SUCH DAMAGE.
63  *
64  ***************************************************************************/
65 
66 #ifndef _RTE_RING_H_
67 #define _RTE_RING_H_
68 
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93 
94 #include <stdio.h>
95 #include <stdint.h>
96 #include <sys/queue.h>
97 #include <errno.h>
98 #include <rte_common.h>
99 #include <rte_memory.h>
100 #include <rte_lcore.h>
101 #include <rte_atomic.h>
102 #include <rte_branch_prediction.h>
103 #include <rte_memzone.h>
104 
105 #define RTE_TAILQ_RING_NAME "RTE_RING"
106 
107 enum rte_ring_queue_behavior {
108  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
109  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
110 };
111 
112 #define RTE_RING_MZ_PREFIX "RG_"
113 
114 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
115  sizeof(RTE_RING_MZ_PREFIX) + 1)
116 
117 struct rte_memzone; /* forward declaration, so as not to require memzone.h */
118 
119 #if RTE_CACHE_LINE_SIZE < 128
120 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
121 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
122 #else
123 #define PROD_ALIGN RTE_CACHE_LINE_SIZE
124 #define CONS_ALIGN RTE_CACHE_LINE_SIZE
125 #endif
126 
127 /* structure to hold a pair of head/tail values and other metadata */
128 struct rte_ring_headtail {
129  volatile uint32_t head;
130  volatile uint32_t tail;
131  uint32_t single;
132 };
133 
144 struct rte_ring {
145  /*
146  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
147  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
148  * next time the ABI changes
149  */
151  int flags;
152  const struct rte_memzone *memzone;
154  uint32_t size;
155  uint32_t mask;
158  struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
159 
161  struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
162 };
163 
164 #define RING_F_SP_ENQ 0x0001
165 #define RING_F_SC_DEQ 0x0002
166 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
168 /* @internal defines for passing to the enqueue dequeue worker functions */
169 #define __IS_SP 1
170 #define __IS_MP 0
171 #define __IS_SC 1
172 #define __IS_MC 0
173 
188 ssize_t rte_ring_get_memsize(unsigned count);
189 
224 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
225  unsigned flags);
226 
266 struct rte_ring *rte_ring_create(const char *name, unsigned count,
267  int socket_id, unsigned flags);
274 void rte_ring_free(struct rte_ring *r);
275 
284 void rte_ring_dump(FILE *f, const struct rte_ring *r);
285 
286 /* the actual enqueue of pointers on the ring.
287  * Placed here since identical code needed in both
288  * single and multi producer enqueue functions */
289 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
290  unsigned int i; \
291  const uint32_t size = (r)->size; \
292  uint32_t idx = prod_head & (r)->mask; \
293  obj_type *ring = (obj_type *)ring_start; \
294  if (likely(idx + n < size)) { \
295  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
296  ring[idx] = obj_table[i]; \
297  ring[idx+1] = obj_table[i+1]; \
298  ring[idx+2] = obj_table[i+2]; \
299  ring[idx+3] = obj_table[i+3]; \
300  } \
301  switch (n & 0x3) { \
302  case 3: \
303  ring[idx++] = obj_table[i++]; /* fallthrough */ \
304  case 2: \
305  ring[idx++] = obj_table[i++]; /* fallthrough */ \
306  case 1: \
307  ring[idx++] = obj_table[i++]; \
308  } \
309  } else { \
310  for (i = 0; idx < size; i++, idx++)\
311  ring[idx] = obj_table[i]; \
312  for (idx = 0; i < n; i++, idx++) \
313  ring[idx] = obj_table[i]; \
314  } \
315 } while (0)
316 
317 /* the actual copy of pointers on the ring to obj_table.
318  * Placed here since identical code needed in both
319  * single and multi consumer dequeue functions */
320 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
321  unsigned int i; \
322  uint32_t idx = cons_head & (r)->mask; \
323  const uint32_t size = (r)->size; \
324  obj_type *ring = (obj_type *)ring_start; \
325  if (likely(idx + n < size)) { \
326  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
327  obj_table[i] = ring[idx]; \
328  obj_table[i+1] = ring[idx+1]; \
329  obj_table[i+2] = ring[idx+2]; \
330  obj_table[i+3] = ring[idx+3]; \
331  } \
332  switch (n & 0x3) { \
333  case 3: \
334  obj_table[i++] = ring[idx++]; /* fallthrough */ \
335  case 2: \
336  obj_table[i++] = ring[idx++]; /* fallthrough */ \
337  case 1: \
338  obj_table[i++] = ring[idx++]; \
339  } \
340  } else { \
341  for (i = 0; idx < size; i++, idx++) \
342  obj_table[i] = ring[idx]; \
343  for (idx = 0; i < n; i++, idx++) \
344  obj_table[i] = ring[idx]; \
345  } \
346 } while (0)
347 
348 static inline __attribute__((always_inline)) void
349 update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
350  uint32_t single)
351 {
352  /*
353  * If there are other enqueues/dequeues in progress that preceded us,
354  * we need to wait for them to complete
355  */
356  if (!single)
357  while (unlikely(ht->tail != old_val))
358  rte_pause();
359 
360  ht->tail = new_val;
361 }
362 
386 static inline __attribute__((always_inline)) unsigned int
387 __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
388  unsigned int n, enum rte_ring_queue_behavior behavior,
389  uint32_t *old_head, uint32_t *new_head,
390  uint32_t *free_entries)
391 {
392  const uint32_t mask = r->mask;
393  unsigned int max = n;
394  int success;
395 
396  do {
397  /* Reset n to the initial burst count */
398  n = max;
399 
400  *old_head = r->prod.head;
401  const uint32_t cons_tail = r->cons.tail;
402  /* The subtraction is done between two unsigned 32bits value
403  * (the result is always modulo 32 bits even if we have
404  * *old_head > cons_tail). So 'free_entries' is always between 0
405  * and size(ring)-1. */
406  *free_entries = (mask + cons_tail - *old_head);
407 
408  /* check that we have enough room in ring */
409  if (unlikely(n > *free_entries))
410  n = (behavior == RTE_RING_QUEUE_FIXED) ?
411  0 : *free_entries;
412 
413  if (n == 0)
414  return 0;
415 
416  *new_head = *old_head + n;
417  if (is_sp)
418  r->prod.head = *new_head, success = 1;
419  else
420  success = rte_atomic32_cmpset(&r->prod.head,
421  *old_head, *new_head);
422  } while (unlikely(success == 0));
423  return n;
424 }
425 
446 static inline __attribute__((always_inline)) unsigned int
447 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
448  unsigned int n, enum rte_ring_queue_behavior behavior,
449  int is_sp, unsigned int *free_space)
450 {
451  uint32_t prod_head, prod_next;
452  uint32_t free_entries;
453 
454  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
455  &prod_head, &prod_next, &free_entries);
456  if (n == 0)
457  goto end;
458 
459  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
460  rte_smp_wmb();
461 
462  update_tail(&r->prod, prod_head, prod_next, is_sp);
463 end:
464  if (free_space != NULL)
465  *free_space = free_entries - n;
466  return n;
467 }
468 
492 static inline __attribute__((always_inline)) unsigned int
493 __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
494  unsigned int n, enum rte_ring_queue_behavior behavior,
495  uint32_t *old_head, uint32_t *new_head,
496  uint32_t *entries)
497 {
498  unsigned int max = n;
499  int success;
500 
501  /* move cons.head atomically */
502  do {
503  /* Restore n as it may change every loop */
504  n = max;
505 
506  *old_head = r->cons.head;
507  const uint32_t prod_tail = r->prod.tail;
508  /* The subtraction is done between two unsigned 32bits value
509  * (the result is always modulo 32 bits even if we have
510  * cons_head > prod_tail). So 'entries' is always between 0
511  * and size(ring)-1. */
512  *entries = (prod_tail - *old_head);
513 
514  /* Set the actual entries for dequeue */
515  if (n > *entries)
516  n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
517 
518  if (unlikely(n == 0))
519  return 0;
520 
521  *new_head = *old_head + n;
522  if (is_sc)
523  r->cons.head = *new_head, success = 1;
524  else
525  success = rte_atomic32_cmpset(&r->cons.head, *old_head,
526  *new_head);
527  } while (unlikely(success == 0));
528  return n;
529 }
530 
551 static inline __attribute__((always_inline)) unsigned int
552 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
553  unsigned int n, enum rte_ring_queue_behavior behavior,
554  int is_sc, unsigned int *available)
555 {
556  uint32_t cons_head, cons_next;
557  uint32_t entries;
558 
559  n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
560  &cons_head, &cons_next, &entries);
561  if (n == 0)
562  goto end;
563 
564  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
565  rte_smp_rmb();
566 
567  update_tail(&r->cons, cons_head, cons_next, is_sc);
568 
569 end:
570  if (available != NULL)
571  *available = entries - n;
572  return n;
573 }
574 
593 static inline unsigned int __attribute__((always_inline))
594 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
595  unsigned int n, unsigned int *free_space)
596 {
597  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
598  __IS_MP, free_space);
599 }
600 
616 static inline unsigned int __attribute__((always_inline))
617 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
618  unsigned int n, unsigned int *free_space)
619 {
620  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
621  __IS_SP, free_space);
622 }
623 
643 static inline unsigned int __attribute__((always_inline))
644 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
645  unsigned int n, unsigned int *free_space)
646 {
647  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
648  r->prod.single, free_space);
649 }
650 
665 static inline int __attribute__((always_inline))
666 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
667 {
668  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
669 }
670 
682 static inline int __attribute__((always_inline))
683 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
684 {
685  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
686 }
687 
703 static inline int __attribute__((always_inline))
704 rte_ring_enqueue(struct rte_ring *r, void *obj)
705 {
706  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
707 }
708 
727 static inline unsigned int __attribute__((always_inline))
728 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
729  unsigned int n, unsigned int *available)
730 {
731  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
732  __IS_MC, available);
733 }
734 
751 static inline unsigned int __attribute__((always_inline))
752 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
753  unsigned int n, unsigned int *available)
754 {
755  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
756  __IS_SC, available);
757 }
758 
778 static inline unsigned int __attribute__((always_inline))
779 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
780  unsigned int *available)
781 {
782  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
783  r->cons.single, available);
784 }
785 
801 static inline int __attribute__((always_inline))
802 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
803 {
804  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
805 }
806 
819 static inline int __attribute__((always_inline))
820 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
821 {
822  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
823 }
824 
841 static inline int __attribute__((always_inline))
842 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
843 {
844  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
845 }
846 
856 static inline int
857 rte_ring_full(const struct rte_ring *r)
858 {
859  uint32_t prod_tail = r->prod.tail;
860  uint32_t cons_tail = r->cons.tail;
861  return ((cons_tail - prod_tail - 1) & r->mask) == 0;
862 }
863 
873 static inline int
874 rte_ring_empty(const struct rte_ring *r)
875 {
876  uint32_t prod_tail = r->prod.tail;
877  uint32_t cons_tail = r->cons.tail;
878  return !!(cons_tail == prod_tail);
879 }
880 
889 static inline unsigned
890 rte_ring_count(const struct rte_ring *r)
891 {
892  uint32_t prod_tail = r->prod.tail;
893  uint32_t cons_tail = r->cons.tail;
894  return (prod_tail - cons_tail) & r->mask;
895 }
896 
905 static inline unsigned
907 {
908  uint32_t prod_tail = r->prod.tail;
909  uint32_t cons_tail = r->cons.tail;
910  return (cons_tail - prod_tail - 1) & r->mask;
911 }
912 
921 static inline unsigned int
922 rte_ring_get_size(const struct rte_ring *r)
923 {
924  return r->size;
925 }
926 
933 void rte_ring_list_dump(FILE *f);
934 
945 struct rte_ring *rte_ring_lookup(const char *name);
946 
965 static inline unsigned __attribute__((always_inline))
966 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
967  unsigned int n, unsigned int *free_space)
968 {
969  return __rte_ring_do_enqueue(r, obj_table, n,
970  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
971 }
972 
988 static inline unsigned __attribute__((always_inline))
989 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
990  unsigned int n, unsigned int *free_space)
991 {
992  return __rte_ring_do_enqueue(r, obj_table, n,
993  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
994 }
995 
1015 static inline unsigned __attribute__((always_inline))
1016 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1017  unsigned int n, unsigned int *free_space)
1018 {
1019  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1020  r->prod.single, free_space);
1021 }
1022 
1043 static inline unsigned __attribute__((always_inline))
1044 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
1045  unsigned int n, unsigned int *available)
1046 {
1047  return __rte_ring_do_dequeue(r, obj_table, n,
1048  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1049 }
1050 
1068 static inline unsigned __attribute__((always_inline))
1069 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
1070  unsigned int n, unsigned int *available)
1071 {
1072  return __rte_ring_do_dequeue(r, obj_table, n,
1073  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1074 }
1075 
1095 static inline unsigned __attribute__((always_inline))
1096 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
1097  unsigned int n, unsigned int *available)
1098 {
1099  return __rte_ring_do_dequeue(r, obj_table, n,
1100  RTE_RING_QUEUE_VARIABLE,
1101  r->cons.single, available);
1102 }
1103 
1104 #ifdef __cplusplus
1105 }
1106 #endif
1107 
1108 #endif /* _RTE_RING_H_ */