DPDK 25.03.0-rc0
rte_rwlock.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5#ifndef _RTE_RWLOCK_H_
6#define _RTE_RWLOCK_H_
7
25#include <errno.h>
26
28#include <rte_common.h>
29#include <rte_lock_annotations.h>
30#include <rte_pause.h>
31#include <rte_stdatomic.h>
32
33#ifdef __cplusplus
34extern "C" {
35#endif
36
54#define RTE_RWLOCK_WAIT 0x1 /* Writer is waiting */
55#define RTE_RWLOCK_WRITE 0x2 /* Writer has the lock */
56#define RTE_RWLOCK_MASK (RTE_RWLOCK_WAIT | RTE_RWLOCK_WRITE)
57 /* Writer is waiting or has lock */
58#define RTE_RWLOCK_READ 0x4 /* Reader increment */
59
60typedef struct __rte_lockable {
61 RTE_ATOMIC(int32_t) cnt;
62} rte_rwlock_t;
63
67#define RTE_RWLOCK_INITIALIZER { 0 }
68
75static inline void
76rte_rwlock_init(rte_rwlock_t *rwl)
77{
78 rwl->cnt = 0;
79}
80
91static inline void
92rte_rwlock_read_lock(rte_rwlock_t *rwl)
93 __rte_shared_lock_function(rwl)
94 __rte_no_thread_safety_analysis
95{
96 int32_t x;
97
98 while (1) {
99 /* Wait while writer is present or pending */
100 while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed)
101 & RTE_RWLOCK_MASK)
102 rte_pause();
103
104 /* Try to get read lock */
105 x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
106 rte_memory_order_acquire) + RTE_RWLOCK_READ;
107
108 /* If no writer, then acquire was successful */
109 if (likely(!(x & RTE_RWLOCK_MASK)))
110 return;
111
112 /* Lost race with writer, backout the change. */
113 rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
114 rte_memory_order_relaxed);
115 }
116}
117
128static inline int
129rte_rwlock_read_trylock(rte_rwlock_t *rwl)
130 __rte_shared_trylock_function(0, rwl)
131 __rte_no_thread_safety_analysis
132{
133 int32_t x;
134
135 x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
136
137 /* fail if write lock is held or writer is pending */
138 if (x & RTE_RWLOCK_MASK)
139 return -EBUSY;
140
141 /* Try to get read lock */
142 x = rte_atomic_fetch_add_explicit(&rwl->cnt, RTE_RWLOCK_READ,
143 rte_memory_order_acquire) + RTE_RWLOCK_READ;
144
145 /* Back out if writer raced in */
146 if (unlikely(x & RTE_RWLOCK_MASK)) {
147 rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ,
148 rte_memory_order_release);
149
150 return -EBUSY;
151 }
152 return 0;
153}
154
161static inline void
162rte_rwlock_read_unlock(rte_rwlock_t *rwl)
163 __rte_unlock_function(rwl)
164 __rte_no_thread_safety_analysis
165{
166 rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_READ, rte_memory_order_release);
167}
168
179static inline int
180rte_rwlock_write_trylock(rte_rwlock_t *rwl)
181 __rte_exclusive_trylock_function(0, rwl)
182 __rte_no_thread_safety_analysis
183{
184 int32_t x;
185
186 x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
187 if (x < RTE_RWLOCK_WRITE &&
188 rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,
189 rte_memory_order_acquire, rte_memory_order_relaxed))
190 return 0;
191 else
192 return -EBUSY;
193}
194
201static inline void
202rte_rwlock_write_lock(rte_rwlock_t *rwl)
203 __rte_exclusive_lock_function(rwl)
204 __rte_no_thread_safety_analysis
205{
206 int32_t x;
207
208 while (1) {
209 x = rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed);
210
211 /* No readers or writers? */
212 if (likely(x < RTE_RWLOCK_WRITE)) {
213 /* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */
214 if (rte_atomic_compare_exchange_weak_explicit(&rwl->cnt, &x,
215 RTE_RWLOCK_WRITE, rte_memory_order_acquire,
216 rte_memory_order_relaxed))
217 return;
218 }
219
220 /* Turn on writer wait bit */
221 if (!(x & RTE_RWLOCK_WAIT))
222 rte_atomic_fetch_or_explicit(&rwl->cnt, RTE_RWLOCK_WAIT,
223 rte_memory_order_relaxed);
224
225 /* Wait until no readers before trying again */
226 while (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed)
228 rte_pause();
229
230 }
231}
232
239static inline void
240rte_rwlock_write_unlock(rte_rwlock_t *rwl)
241 __rte_unlock_function(rwl)
242 __rte_no_thread_safety_analysis
243{
244 rte_atomic_fetch_sub_explicit(&rwl->cnt, RTE_RWLOCK_WRITE, rte_memory_order_release);
245}
246
255static inline int
257{
258 if (rte_atomic_load_explicit(&rwl->cnt, rte_memory_order_relaxed) & RTE_RWLOCK_WRITE)
259 return 1;
260
261 return 0;
262}
263
277static inline void
278rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
279 __rte_shared_lock_function(rwl);
280
287static inline void
289 __rte_unlock_function(rwl);
290
304static inline void
305rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
306 __rte_exclusive_lock_function(rwl);
307
314static inline void
316 __rte_unlock_function(rwl);
317
318#ifdef __cplusplus
319}
320#endif
321
322#endif /* _RTE_RWLOCK_H_ */
#define likely(x)
#define unlikely(x)
static void rte_pause(void)
static void rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
static int rte_rwlock_read_trylock(rte_rwlock_t *rwl) rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:129
static int rte_rwlock_write_trylock(rte_rwlock_t *rwl) rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:180
static void rte_rwlock_read_unlock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:162
static void rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
static void rte_rwlock_write_unlock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:240
#define RTE_RWLOCK_WAIT
Definition: rte_rwlock.h:54
static void rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
static void rte_rwlock_read_lock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:92
static void rte_rwlock_write_lock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:202
static void rte_rwlock_init(rte_rwlock_t *rwl)
Definition: rte_rwlock.h:76
static void rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
static int rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
Definition: rte_rwlock.h:256