DPDK  21.02.0
rte_ring_rts_elem_pvt.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2020 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_RTS_ELEM_PVT_H_
11 #define _RTE_RING_RTS_ELEM_PVT_H_
12 
24 static __rte_always_inline void
25 __rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
26 {
27  union __rte_ring_rts_poscnt h, ot, nt;
28 
29  /*
30  * If there are other enqueues/dequeues in progress that
31  * might preceded us, then don't update tail with new value.
32  */
33 
34  ot.raw = __atomic_load_n(&ht->tail.raw, __ATOMIC_ACQUIRE);
35 
36  do {
37  /* on 32-bit systems we have to do atomic read here */
38  h.raw = __atomic_load_n(&ht->head.raw, __ATOMIC_RELAXED);
39 
40  nt.raw = ot.raw;
41  if (++nt.val.cnt == h.val.cnt)
42  nt.val.pos = h.val.pos;
43 
44  } while (__atomic_compare_exchange_n(&ht->tail.raw, &ot.raw, nt.raw,
45  0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE) == 0);
46 }
47 
52 static __rte_always_inline void
53 __rte_ring_rts_head_wait(const struct rte_ring_rts_headtail *ht,
54  union __rte_ring_rts_poscnt *h)
55 {
56  uint32_t max;
57 
58  max = ht->htd_max;
59 
60  while (h->val.pos - ht->tail.val.pos > max) {
61  rte_pause();
62  h->raw = __atomic_load_n(&ht->head.raw, __ATOMIC_ACQUIRE);
63  }
64 }
65 
69 static __rte_always_inline uint32_t
70 __rte_ring_rts_move_prod_head(struct rte_ring *r, uint32_t num,
71  enum rte_ring_queue_behavior behavior, uint32_t *old_head,
72  uint32_t *free_entries)
73 {
74  uint32_t n;
75  union __rte_ring_rts_poscnt nh, oh;
76 
77  const uint32_t capacity = r->capacity;
78 
79  oh.raw = __atomic_load_n(&r->rts_prod.head.raw, __ATOMIC_ACQUIRE);
80 
81  do {
82  /* Reset n to the initial burst count */
83  n = num;
84 
85  /*
86  * wait for prod head/tail distance,
87  * make sure that we read prod head *before*
88  * reading cons tail.
89  */
90  __rte_ring_rts_head_wait(&r->rts_prod, &oh);
91 
92  /*
93  * The subtraction is done between two unsigned 32bits value
94  * (the result is always modulo 32 bits even if we have
95  * *old_head > cons_tail). So 'free_entries' is always between 0
96  * and capacity (which is < size).
97  */
98  *free_entries = capacity + r->cons.tail - oh.val.pos;
99 
100  /* check that we have enough room in ring */
101  if (unlikely(n > *free_entries))
102  n = (behavior == RTE_RING_QUEUE_FIXED) ?
103  0 : *free_entries;
104 
105  if (n == 0)
106  break;
107 
108  nh.val.pos = oh.val.pos + n;
109  nh.val.cnt = oh.val.cnt + 1;
110 
111  /*
112  * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
113  * - OOO reads of cons tail value
114  * - OOO copy of elems to the ring
115  */
116  } while (__atomic_compare_exchange_n(&r->rts_prod.head.raw,
117  &oh.raw, nh.raw,
118  0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
119 
120  *old_head = oh.val.pos;
121  return n;
122 }
123 
127 static __rte_always_inline unsigned int
128 __rte_ring_rts_move_cons_head(struct rte_ring *r, uint32_t num,
129  enum rte_ring_queue_behavior behavior, uint32_t *old_head,
130  uint32_t *entries)
131 {
132  uint32_t n;
133  union __rte_ring_rts_poscnt nh, oh;
134 
135  oh.raw = __atomic_load_n(&r->rts_cons.head.raw, __ATOMIC_ACQUIRE);
136 
137  /* move cons.head atomically */
138  do {
139  /* Restore n as it may change every loop */
140  n = num;
141 
142  /*
143  * wait for cons head/tail distance,
144  * make sure that we read cons head *before*
145  * reading prod tail.
146  */
147  __rte_ring_rts_head_wait(&r->rts_cons, &oh);
148 
149  /* The subtraction is done between two unsigned 32bits value
150  * (the result is always modulo 32 bits even if we have
151  * cons_head > prod_tail). So 'entries' is always between 0
152  * and size(ring)-1.
153  */
154  *entries = r->prod.tail - oh.val.pos;
155 
156  /* Set the actual entries for dequeue */
157  if (n > *entries)
158  n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
159 
160  if (unlikely(n == 0))
161  break;
162 
163  nh.val.pos = oh.val.pos + n;
164  nh.val.cnt = oh.val.cnt + 1;
165 
166  /*
167  * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
168  * - OOO reads of prod tail value
169  * - OOO copy of elems from the ring
170  */
171  } while (__atomic_compare_exchange_n(&r->rts_cons.head.raw,
172  &oh.raw, nh.raw,
173  0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0);
174 
175  *old_head = oh.val.pos;
176  return n;
177 }
178 
201 static __rte_always_inline unsigned int
202 __rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
203  uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
204  uint32_t *free_space)
205 {
206  uint32_t free, head;
207 
208  n = __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
209 
210  if (n != 0) {
211  __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
212  __rte_ring_rts_update_tail(&r->rts_prod);
213  }
214 
215  if (free_space != NULL)
216  *free_space = free - n;
217  return n;
218 }
219 
242 static __rte_always_inline unsigned int
243 __rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
244  uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
245  uint32_t *available)
246 {
247  uint32_t entries, head;
248 
249  n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
250 
251  if (n != 0) {
252  __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
253  __rte_ring_rts_update_tail(&r->rts_cons);
254  }
255 
256  if (available != NULL)
257  *available = entries - n;
258  return n;
259 }
260 
261 #endif /* _RTE_RING_RTS_ELEM_PVT_H_ */
#define __rte_always_inline
Definition: rte_common.h:226
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
#define unlikely(x)
static void rte_pause(void)
uint32_t capacity
volatile uint32_t tail
Definition: rte_ring_core.h:73