DPDK  23.07.0
rte_rwlock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_RWLOCK_H_
6 #define _RTE_RWLOCK_H_
7 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
29 #include <errno.h>
30 
31 #include <rte_branch_prediction.h>
32 #include <rte_common.h>
33 #include <rte_lock_annotations.h>
34 #include <rte_pause.h>
35 
53 #define RTE_RWLOCK_WAIT 0x1 /* Writer is waiting */
54 #define RTE_RWLOCK_WRITE 0x2 /* Writer has the lock */
55 #define RTE_RWLOCK_MASK (RTE_RWLOCK_WAIT | RTE_RWLOCK_WRITE)
56  /* Writer is waiting or has lock */
57 #define RTE_RWLOCK_READ 0x4 /* Reader increment */
58 
59 typedef struct __rte_lockable {
60  int32_t cnt;
61 } rte_rwlock_t;
62 
66 #define RTE_RWLOCK_INITIALIZER { 0 }
67 
74 static inline void
75 rte_rwlock_init(rte_rwlock_t *rwl)
76 {
77  rwl->cnt = 0;
78 }
79 
86 static inline void
87 rte_rwlock_read_lock(rte_rwlock_t *rwl)
88  __rte_shared_lock_function(rwl)
89  __rte_no_thread_safety_analysis
90 {
91  int32_t x;
92 
93  while (1) {
94  /* Wait while writer is present or pending */
95  while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED)
96  & RTE_RWLOCK_MASK)
97  rte_pause();
98 
99  /* Try to get read lock */
100  x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
101  __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
102 
103  /* If no writer, then acquire was successful */
104  if (likely(!(x & RTE_RWLOCK_MASK)))
105  return;
106 
107  /* Lost race with writer, backout the change. */
108  __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,
109  __ATOMIC_RELAXED);
110  }
111 }
112 
123 static inline int
124 rte_rwlock_read_trylock(rte_rwlock_t *rwl)
125  __rte_shared_trylock_function(0, rwl)
126  __rte_no_thread_safety_analysis
127 {
128  int32_t x;
129 
130  x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
131 
132  /* fail if write lock is held or writer is pending */
133  if (x & RTE_RWLOCK_MASK)
134  return -EBUSY;
135 
136  /* Try to get read lock */
137  x = __atomic_fetch_add(&rwl->cnt, RTE_RWLOCK_READ,
138  __ATOMIC_ACQUIRE) + RTE_RWLOCK_READ;
139 
140  /* Back out if writer raced in */
141  if (unlikely(x & RTE_RWLOCK_MASK)) {
142  __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ,
143  __ATOMIC_RELEASE);
144 
145  return -EBUSY;
146  }
147  return 0;
148 }
149 
156 static inline void
157 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
158  __rte_unlock_function(rwl)
159  __rte_no_thread_safety_analysis
160 {
161  __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_READ, __ATOMIC_RELEASE);
162 }
163 
174 static inline int
175 rte_rwlock_write_trylock(rte_rwlock_t *rwl)
176  __rte_exclusive_trylock_function(0, rwl)
177  __rte_no_thread_safety_analysis
178 {
179  int32_t x;
180 
181  x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
182  if (x < RTE_RWLOCK_WRITE &&
183  __atomic_compare_exchange_n(&rwl->cnt, &x, x + RTE_RWLOCK_WRITE,
184  1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
185  return 0;
186  else
187  return -EBUSY;
188 }
189 
196 static inline void
197 rte_rwlock_write_lock(rte_rwlock_t *rwl)
198  __rte_exclusive_lock_function(rwl)
199  __rte_no_thread_safety_analysis
200 {
201  int32_t x;
202 
203  while (1) {
204  x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
205 
206  /* No readers or writers? */
207  if (likely(x < RTE_RWLOCK_WRITE)) {
208  /* Turn off RTE_RWLOCK_WAIT, turn on RTE_RWLOCK_WRITE */
209  if (__atomic_compare_exchange_n(&rwl->cnt, &x, RTE_RWLOCK_WRITE, 1,
210  __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
211  return;
212  }
213 
214  /* Turn on writer wait bit */
215  if (!(x & RTE_RWLOCK_WAIT))
216  __atomic_fetch_or(&rwl->cnt, RTE_RWLOCK_WAIT, __ATOMIC_RELAXED);
217 
218  /* Wait until no readers before trying again */
219  while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > RTE_RWLOCK_WAIT)
220  rte_pause();
221 
222  }
223 }
224 
231 static inline void
232 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
233  __rte_unlock_function(rwl)
234  __rte_no_thread_safety_analysis
235 {
236  __atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE);
237 }
238 
247 static inline int
248 rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
249 {
250  if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE)
251  return 1;
252 
253  return 0;
254 }
255 
269 static inline void
270 rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
271  __rte_shared_lock_function(rwl);
272 
279 static inline void
280 rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
281  __rte_unlock_function(rwl);
282 
296 static inline void
297 rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
298  __rte_exclusive_lock_function(rwl);
299 
306 static inline void
307 rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
308  __rte_unlock_function(rwl);
309 
310 #ifdef __cplusplus
311 }
312 #endif
313 
314 #endif /* _RTE_RWLOCK_H_ */
#define likely(x)
static void rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
static int rte_rwlock_read_trylock(rte_rwlock_t *rwl) rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:124
#define unlikely(x)
static int rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
Definition: rte_rwlock.h:248
static void rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
static void rte_pause(void)
#define RTE_RWLOCK_WAIT
Definition: rte_rwlock.h:53
static void rte_rwlock_write_unlock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:232
static int rte_rwlock_write_trylock(rte_rwlock_t *rwl) rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:175
static void rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
static void rte_rwlock_write_lock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:197
static void rte_rwlock_init(rte_rwlock_t *rwl)
Definition: rte_rwlock.h:75
static void rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
static void rte_rwlock_read_lock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:87
static void rte_rwlock_read_unlock(rte_rwlock_t *rwl) __rte_no_thread_safety_analysis
Definition: rte_rwlock.h:157