DPDK  24.11.0-rc3
rte_mcslock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_MCSLOCK_H_
6 #define _RTE_MCSLOCK_H_
7 
22 #include <rte_lcore.h>
23 #include <rte_common.h>
24 #include <rte_pause.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_stdatomic.h>
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
35 typedef struct rte_mcslock {
36  RTE_ATOMIC(struct rte_mcslock *) next;
37  RTE_ATOMIC(int) locked; /* 1 if the queue locked, 0 otherwise */
39 
51 static inline void
53 {
54  rte_mcslock_t *prev;
55 
56  /* Init me node */
57  rte_atomic_store_explicit(&me->locked, 1, rte_memory_order_relaxed);
58  rte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed);
59 
60  /* If the queue is empty, the exchange operation is enough to acquire
61  * the lock. Hence, the exchange operation requires acquire semantics.
62  * The store to me->next above should complete before the node is
63  * visible to other CPUs/threads. Hence, the exchange operation requires
64  * release semantics as well.
65  */
66  prev = rte_atomic_exchange_explicit(msl, me, rte_memory_order_acq_rel);
67  if (likely(prev == NULL)) {
68  /* Queue was empty, no further action required,
69  * proceed with lock taken.
70  */
71  return;
72  }
73  /* The store to me->next above should also complete before the node is
74  * visible to predecessor thread releasing the lock. Hence, the store
75  * prev->next also requires release semantics. Note that, for example,
76  * on ARM, the release semantics in the exchange operation is not
77  * strong as a release fence and is not sufficient to enforce the
78  * desired order here.
79  */
80  rte_atomic_store_explicit(&prev->next, me, rte_memory_order_release);
81 
82  /* The while-load of me->locked should not move above the previous
83  * store to prev->next. Otherwise it will cause a deadlock. Need a
84  * store-load barrier.
85  */
86  rte_atomic_thread_fence(rte_memory_order_acq_rel);
87  /* If the lock has already been acquired, it first atomically
88  * places the node at the end of the queue and then proceeds
89  * to spin on me->locked until the previous lock holder resets
90  * the me->locked using mcslock_unlock().
91  */
92  rte_wait_until_equal_32((uint32_t *)(uintptr_t)&me->locked, 0, rte_memory_order_acquire);
93 }
94 
103 static inline void
104 rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *) me)
105 {
106  /* Check if there are more nodes in the queue. */
107  if (likely(rte_atomic_load_explicit(&me->next, rte_memory_order_relaxed) == NULL)) {
108  /* No, last member in the queue. */
109  rte_mcslock_t *save_me = rte_atomic_load_explicit(&me, rte_memory_order_relaxed);
110 
111  /* Release the lock by setting it to NULL */
112  if (likely(rte_atomic_compare_exchange_strong_explicit(msl, &save_me, NULL,
113  rte_memory_order_release, rte_memory_order_relaxed)))
114  return;
115 
116  /* Speculative execution would be allowed to read in the
117  * while-loop first. This has the potential to cause a
118  * deadlock. Need a load barrier.
119  */
120  rte_atomic_thread_fence(rte_memory_order_acquire);
121  /* More nodes added to the queue by other CPUs.
122  * Wait until the next pointer is set.
123  */
124  RTE_ATOMIC(uintptr_t) *next;
125  next = (__rte_atomic uintptr_t *)&me->next;
126  RTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0, rte_memory_order_relaxed);
127  }
128 
129  /* Pass lock to next waiter. */
130  rte_atomic_store_explicit(&me->next->locked, 0, rte_memory_order_release);
131 }
132 
143 static inline int
145 {
146  /* Init me node */
147  rte_atomic_store_explicit(&me->next, NULL, rte_memory_order_relaxed);
148 
149  /* Try to lock */
150  rte_mcslock_t *expected = NULL;
151 
152  /* The lock can be taken only when the queue is empty. Hence,
153  * the compare-exchange operation requires acquire semantics.
154  * The store to me->next above should complete before the node
155  * is visible to other CPUs/threads. Hence, the compare-exchange
156  * operation requires release semantics as well.
157  */
158  return rte_atomic_compare_exchange_strong_explicit(msl, &expected, me,
159  rte_memory_order_acq_rel, rte_memory_order_relaxed);
160 }
161 
170 static inline int
172 {
173  return (rte_atomic_load_explicit(&msl, rte_memory_order_relaxed) != NULL);
174 }
175 
176 #ifdef __cplusplus
177 }
178 #endif
179 
180 #endif /* _RTE_MCSLOCK_H_ */
#define likely(x)
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
Definition: rte_pause.h:95
static int rte_mcslock_trylock(1(rte_mcslock_t *) *msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:144
struct rte_mcslock rte_mcslock_t
static void rte_mcslock_lock(1(rte_mcslock_t *) *msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:52
static void rte_atomic_thread_fence(rte_memory_order memorder)
static void rte_mcslock_unlock(1(rte_mcslock_t *) *msl, 1(rte_mcslock_t *) me)
Definition: rte_mcslock.h:104
static int rte_mcslock_is_locked(1(rte_mcslock_t *) msl)
Definition: rte_mcslock.h:171