DPDK  22.07.0
rte_mcslock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_MCSLOCK_H_
6 #define _RTE_MCSLOCK_H_
7 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 #include <rte_lcore.h>
27 #include <rte_common.h>
28 #include <rte_pause.h>
29 #include <rte_branch_prediction.h>
30 
34 typedef struct rte_mcslock {
35  struct rte_mcslock *next;
36  int locked; /* 1 if the queue locked, 0 otherwise */
38 
50 static inline void
52 {
53  rte_mcslock_t *prev;
54 
55  /* Init me node */
56  __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
57  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
58 
59  /* If the queue is empty, the exchange operation is enough to acquire
60  * the lock. Hence, the exchange operation requires acquire semantics.
61  * The store to me->next above should complete before the node is
62  * visible to other CPUs/threads. Hence, the exchange operation requires
63  * release semantics as well.
64  */
65  prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
66  if (likely(prev == NULL)) {
67  /* Queue was empty, no further action required,
68  * proceed with lock taken.
69  */
70  return;
71  }
72  /* The store to me->next above should also complete before the node is
73  * visible to predecessor thread releasing the lock. Hence, the store
74  * prev->next also requires release semantics. Note that, for example,
75  * on ARM, the release semantics in the exchange operation is not
76  * strong as a release fence and is not sufficient to enforce the
77  * desired order here.
78  */
79  __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE);
80 
81  /* The while-load of me->locked should not move above the previous
82  * store to prev->next. Otherwise it will cause a deadlock. Need a
83  * store-load barrier.
84  */
85  __atomic_thread_fence(__ATOMIC_ACQ_REL);
86  /* If the lock has already been acquired, it first atomically
87  * places the node at the end of the queue and then proceeds
88  * to spin on me->locked until the previous lock holder resets
89  * the me->locked using mcslock_unlock().
90  */
91  rte_wait_until_equal_32((uint32_t *)&me->locked, 0, __ATOMIC_ACQUIRE);
92 }
93 
102 static inline void
104 {
105  /* Check if there are more nodes in the queue. */
106  if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
107  /* No, last member in the queue. */
108  rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
109 
110  /* Release the lock by setting it to NULL */
111  if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
112  __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
113  return;
114 
115  /* Speculative execution would be allowed to read in the
116  * while-loop first. This has the potential to cause a
117  * deadlock. Need a load barrier.
118  */
119  __atomic_thread_fence(__ATOMIC_ACQUIRE);
120  /* More nodes added to the queue by other CPUs.
121  * Wait until the next pointer is set.
122  */
123  uintptr_t *next;
124  next = (uintptr_t *)&me->next;
125  RTE_WAIT_UNTIL_MASKED(next, UINTPTR_MAX, !=, 0,
126  __ATOMIC_RELAXED);
127  }
128 
129  /* Pass lock to next waiter. */
130  __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
131 }
132 
143 static inline int
145 {
146  /* Init me node */
147  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
148 
149  /* Try to lock */
150  rte_mcslock_t *expected = NULL;
151 
152  /* The lock can be taken only when the queue is empty. Hence,
153  * the compare-exchange operation requires acquire semantics.
154  * The store to me->next above should complete before the node
155  * is visible to other CPUs/threads. Hence, the compare-exchange
156  * operation requires release semantics as well.
157  */
158  return __atomic_compare_exchange_n(msl, &expected, me, 0,
159  __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
160 }
161 
170 static inline int
172 {
173  return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
174 }
175 
176 #ifdef __cplusplus
177 }
178 #endif
179 
180 #endif /* _RTE_MCSLOCK_H_ */
static void rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:51
#define likely(x)
struct rte_mcslock rte_mcslock_t
static int rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:144
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int memorder)
Definition: rte_pause.h:96
static int rte_mcslock_is_locked(rte_mcslock_t *msl)
Definition: rte_mcslock.h:171
static void rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:103