DPDK 22.11.11-rc1
rte_ring_c11_pvt.h
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2017,2018 HXT-semitech Corporation.
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5 * Copyright (c) 2021 Arm Limited
6 * All rights reserved.
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
9 */
10
11#ifndef _RTE_RING_C11_PVT_H_
12#define _RTE_RING_C11_PVT_H_
13
14static __rte_always_inline void
15__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
16 uint32_t new_val, uint32_t single, uint32_t enqueue)
17{
18 RTE_SET_USED(enqueue);
19
20 /*
21 * If there are other enqueues/dequeues in progress that preceded us,
22 * we need to wait for them to complete
23 */
24 if (!single)
25 rte_wait_until_equal_32(&ht->tail, old_val, __ATOMIC_RELAXED);
26
27 /*
28 * R0: Establishes a synchronizing edge with load-acquire of
29 * cons_tail at A1 or prod_tail at A4.
30 * Ensures that memory effects by this thread on ring elements array
31 * is observed by a different thread of the other type.
32 */
33 __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);
34}
35
59static __rte_always_inline unsigned int
60__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
61 unsigned int n, enum rte_ring_queue_behavior behavior,
62 uint32_t *old_head, uint32_t *new_head,
63 uint32_t *free_entries)
64{
65 const uint32_t capacity = r->capacity;
66 uint32_t cons_tail;
67 unsigned int max = n;
68 int success;
69
70 /*
71 * A0: Establishes a synchronizing edge with R1.
72 * Ensure that this thread observes same values
73 * to cons_tail observed by the thread that
74 * updated r->prod.head.
75 * If not, an unsafe partial order may ensue.
76 */
77 *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_ACQUIRE);
78 do {
79 /* Reset n to the initial burst count */
80 n = max;
81
82 /*
83 * A1: Establishes a synchronizing edge with R0.
84 * Ensures that other thread's memory effects on
85 * ring elements array is observed by the time
86 * this thread observes its tail update.
87 */
88 cons_tail = __atomic_load_n(&r->cons.tail, __ATOMIC_ACQUIRE);
89
90 /* The subtraction is done between two unsigned 32bits value
91 * (the result is always modulo 32 bits even if we have
92 * *old_head > cons_tail). So 'free_entries' is always between 0
93 * and capacity (which is < size).
94 */
95 *free_entries = (capacity + cons_tail - *old_head);
96
97 /* check that we have enough room in ring */
98 if (unlikely(n > *free_entries))
99 n = (behavior == RTE_RING_QUEUE_FIXED) ?
100 0 : *free_entries;
101
102 if (n == 0)
103 return 0;
104
105 *new_head = *old_head + n;
106 if (is_sp)
107 r->prod.head = *new_head, success = 1;
108 else
109 /* on failure, *old_head is updated */
110 /*
111 * R1/A2.
112 * R1: Establishes a synchronizing edge with A0 of a
113 * different thread.
114 * A2: Establishes a synchronizing edge with R1 of a
115 * different thread to observe same value for
116 * cons_tail observed by that thread on CAS failure
117 * (to retry with an updated *old_head).
118 */
119 success = __atomic_compare_exchange_n(&r->prod.head,
120 old_head, *new_head,
121 0, __ATOMIC_RELEASE,
122 __ATOMIC_ACQUIRE);
123 } while (unlikely(success == 0));
124 return n;
125}
126
150static __rte_always_inline unsigned int
151__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
152 unsigned int n, enum rte_ring_queue_behavior behavior,
153 uint32_t *old_head, uint32_t *new_head,
154 uint32_t *entries)
155{
156 unsigned int max = n;
157 uint32_t prod_tail;
158 int success;
159
160 /*
161 * A3: Establishes a synchronizing edge with R2.
162 * Ensure that this thread observes same values
163 * to prod_tail observed by the thread that
164 * updated r->cons.head.
165 * If not, an unsafe partial order may ensue.
166 */
167 *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_ACQUIRE);
168 do {
169 /* Restore n as it may change every loop */
170 n = max;
171
172 /*
173 * A4: Establishes a synchronizing edge with R0.
174 * Ensures that other thread's memory effects on
175 * ring elements array is observed by the time
176 * this thread observes its tail update.
177 */
178 prod_tail = __atomic_load_n(&r->prod.tail, __ATOMIC_ACQUIRE);
179
180 /* The subtraction is done between two unsigned 32bits value
181 * (the result is always modulo 32 bits even if we have
182 * cons_head > prod_tail). So 'entries' is always between 0
183 * and size(ring)-1.
184 */
185 *entries = (prod_tail - *old_head);
186
187 /* Set the actual entries for dequeue */
188 if (n > *entries)
189 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
190
191 if (unlikely(n == 0))
192 return 0;
193
194 *new_head = *old_head + n;
195 if (is_sc)
196 r->cons.head = *new_head, success = 1;
197 else
198 /* on failure, *old_head will be updated */
199 /*
200 * R2/A5.
201 * R2: Establishes a synchronizing edge with A3 of a
202 * different thread.
203 * A5: Establishes a synchronizing edge with R2 of a
204 * different thread to observe same value for
205 * prod_tail observed by that thread on CAS failure
206 * (to retry with an updated *old_head).
207 */
208 success = __atomic_compare_exchange_n(&r->cons.head,
209 old_head, *new_head,
210 0, __ATOMIC_RELEASE,
211 __ATOMIC_ACQUIRE);
212 } while (unlikely(success == 0));
213 return n;
214}
215
216#endif /* _RTE_RING_C11_PVT_H_ */
#define unlikely(x)
#define RTE_SET_USED(x)
Definition: rte_common.h:135
#define __rte_always_inline
Definition: rte_common.h:255
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int memorder)
Definition: rte_pause.h:95
rte_ring_queue_behavior
Definition: rte_ring_core.h:43
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:45
volatile uint32_t head
Definition: rte_ring_core.h:69
volatile uint32_t tail
Definition: rte_ring_core.h:70
uint32_t capacity