DPDK  21.02.0
rte_mcslock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_MCSLOCK_H_
6 #define _RTE_MCSLOCK_H_
7 
22 #include <rte_lcore.h>
23 #include <rte_common.h>
24 #include <rte_pause.h>
25 #include <rte_branch_prediction.h>
26 
30 typedef struct rte_mcslock {
31  struct rte_mcslock *next;
32  int locked; /* 1 if the queue locked, 0 otherwise */
34 
46 static inline void
48 {
49  rte_mcslock_t *prev;
50 
51  /* Init me node */
52  __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
53  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
54 
55  /* If the queue is empty, the exchange operation is enough to acquire
56  * the lock. Hence, the exchange operation requires acquire semantics.
57  * The store to me->next above should complete before the node is
58  * visible to other CPUs/threads. Hence, the exchange operation requires
59  * release semantics as well.
60  */
61  prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
62  if (likely(prev == NULL)) {
63  /* Queue was empty, no further action required,
64  * proceed with lock taken.
65  */
66  return;
67  }
68  /* The store to me->next above should also complete before the node is
69  * visible to predecessor thread releasing the lock. Hence, the store
70  * prev->next also requires release semantics. Note that, for example,
71  * on ARM, the release semantics in the exchange operation is not
72  * strong as a release fence and is not sufficient to enforce the
73  * desired order here.
74  */
75  __atomic_store_n(&prev->next, me, __ATOMIC_RELEASE);
76 
77  /* The while-load of me->locked should not move above the previous
78  * store to prev->next. Otherwise it will cause a deadlock. Need a
79  * store-load barrier.
80  */
81  __atomic_thread_fence(__ATOMIC_ACQ_REL);
82  /* If the lock has already been acquired, it first atomically
83  * places the node at the end of the queue and then proceeds
84  * to spin on me->locked until the previous lock holder resets
85  * the me->locked using mcslock_unlock().
86  */
87  while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
88  rte_pause();
89 }
90 
99 static inline void
101 {
102  /* Check if there are more nodes in the queue. */
103  if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
104  /* No, last member in the queue. */
105  rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
106 
107  /* Release the lock by setting it to NULL */
108  if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
109  __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
110  return;
111 
112  /* Speculative execution would be allowed to read in the
113  * while-loop first. This has the potential to cause a
114  * deadlock. Need a load barrier.
115  */
116  __atomic_thread_fence(__ATOMIC_ACQUIRE);
117  /* More nodes added to the queue by other CPUs.
118  * Wait until the next pointer is set.
119  */
120  while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
121  rte_pause();
122  }
123 
124  /* Pass lock to next waiter. */
125  __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
126 }
127 
138 static inline int
140 {
141  /* Init me node */
142  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
143 
144  /* Try to lock */
145  rte_mcslock_t *expected = NULL;
146 
147  /* The lock can be taken only when the queue is empty. Hence,
148  * the compare-exchange operation requires acquire semantics.
149  * The store to me->next above should complete before the node
150  * is visible to other CPUs/threads. Hence, the compare-exchange
151  * operation requires release semantics as well.
152  */
153  return __atomic_compare_exchange_n(msl, &expected, me, 0,
154  __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
155 }
156 
165 static inline int
167 {
168  return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
169 }
170 
171 #endif /* _RTE_MCSLOCK_H_ */
static void rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:47
#define likely(x)
struct rte_mcslock rte_mcslock_t
static void rte_pause(void)
static int rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:139
static int rte_mcslock_is_locked(rte_mcslock_t *msl)
Definition: rte_mcslock.h:166
static void rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:100