DPDK 25.11.0-rc2
rte_ring_c11_pvt.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2017,2018 HXT-semitech Corporation.
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5 * Copyright (c) 2021 Arm Limited
6 * All rights reserved.
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
9 */
10
11#ifndef _RTE_RING_C11_PVT_H_
12#define _RTE_RING_C11_PVT_H_
13
25static __rte_always_inline void
26__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
27 uint32_t new_val, uint32_t single, uint32_t enqueue)
28{
29 RTE_SET_USED(enqueue);
30
31 /*
32 * If there are other enqueues/dequeues in progress that preceded us,
33 * we need to wait for them to complete
34 */
35 if (!single)
36 rte_wait_until_equal_32((uint32_t *)(uintptr_t)&ht->tail, old_val,
37 rte_memory_order_relaxed);
38
39 /*
40 * R0: Establishes a synchronizing edge with load-acquire of tail at A1.
41 * Ensures that memory effects by this thread on ring elements array
42 * is observed by a different thread of the other type.
43 */
44 rte_atomic_store_explicit(&ht->tail, new_val, rte_memory_order_release);
45}
46
74static __rte_always_inline unsigned int
75__rte_ring_headtail_move_head(struct rte_ring_headtail *d,
76 const struct rte_ring_headtail *s, uint32_t capacity,
77 unsigned int is_st, unsigned int n,
78 enum rte_ring_queue_behavior behavior,
79 uint32_t *old_head, uint32_t *new_head, uint32_t *entries)
80{
81 uint32_t stail;
82 int success;
83 unsigned int max = n;
84
85 /*
86 * A0: Establishes a synchronizing edge with R1.
87 * Ensure that this thread observes same values
88 * to stail observed by the thread that updated
89 * d->head.
90 * If not, an unsafe partial order may ensue.
91 */
92 *old_head = rte_atomic_load_explicit(&d->head,
93 rte_memory_order_acquire);
94 do {
95 /* Reset n to the initial burst count */
96 n = max;
97
98 /*
99 * A1: Establishes a synchronizing edge with R0.
100 * Ensures that other thread's memory effects on
101 * ring elements array is observed by the time
102 * this thread observes its tail update.
103 */
104 stail = rte_atomic_load_explicit(&s->tail,
105 rte_memory_order_acquire);
106
107 /* The subtraction is done between two unsigned 32bits value
108 * (the result is always modulo 32 bits even if we have
109 * *old_head > s->tail). So 'entries' is always between 0
110 * and capacity (which is < size).
111 */
112 *entries = (capacity + stail - *old_head);
113
114 /* check that we have enough room in ring */
115 if (unlikely(n > *entries))
116 n = (behavior == RTE_RING_QUEUE_FIXED) ?
117 0 : *entries;
118
119 if (n == 0)
120 return 0;
121
122 *new_head = *old_head + n;
123 if (is_st) {
124 d->head = *new_head;
125 success = 1;
126 } else
127 /* on failure, *old_head is updated */
128 /*
129 * R1/A2.
130 * R1: Establishes a synchronizing edge with A0 of a
131 * different thread.
132 * A2: Establishes a synchronizing edge with R1 of a
133 * different thread to observe same value for stail
134 * observed by that thread on CAS failure (to retry
135 * with an updated *old_head).
136 */
137 success = rte_atomic_compare_exchange_strong_explicit(
138 &d->head, old_head, *new_head,
139 rte_memory_order_release,
140 rte_memory_order_acquire);
141 } while (unlikely(success == 0));
142 return n;
143}
144
145#endif /* _RTE_RING_C11_PVT_H_ */
#define unlikely(x)
#define RTE_SET_USED(x)
Definition: rte_common.h:264
#define __rte_always_inline
Definition: rte_common.h:490
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
Definition: rte_pause.h:95
rte_ring_queue_behavior
Definition: rte_ring_core.h:40
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:42