DPDK  19.08.2
rte_mcslock.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_MCSLOCK_H_
6 #define _RTE_MCSLOCK_H_
7 
22 #include <rte_lcore.h>
23 #include <rte_common.h>
24 #include <rte_pause.h>
25 
29 typedef struct rte_mcslock {
30  struct rte_mcslock *next;
31  int locked; /* 1 if the queue locked, 0 otherwise */
33 
48 __rte_experimental
49 static inline void
51 {
52  rte_mcslock_t *prev;
53 
54  /* Init me node */
55  __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
56  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
57 
58  /* If the queue is empty, the exchange operation is enough to acquire
59  * the lock. Hence, the exchange operation requires acquire semantics.
60  * The store to me->next above should complete before the node is
61  * visible to other CPUs/threads. Hence, the exchange operation requires
62  * release semantics as well.
63  */
64  prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
65  if (likely(prev == NULL)) {
66  /* Queue was empty, no further action required,
67  * proceed with lock taken.
68  */
69  return;
70  }
71  __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED);
72 
73  /* The while-load of me->locked should not move above the previous
74  * store to prev->next. Otherwise it will cause a deadlock. Need a
75  * store-load barrier.
76  */
77  __atomic_thread_fence(__ATOMIC_ACQ_REL);
78  /* If the lock has already been acquired, it first atomically
79  * places the node at the end of the queue and then proceeds
80  * to spin on me->locked until the previous lock holder resets
81  * the me->locked using mcslock_unlock().
82  */
83  while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
84  rte_pause();
85 }
86 
98 __rte_experimental
99 static inline void
101 {
102  /* Check if there are more nodes in the queue. */
103  if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
104  /* No, last member in the queue. */
105  rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
106 
107  /* Release the lock by setting it to NULL */
108  if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
109  __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
110  return;
111 
112  /* Speculative execution would be allowed to read in the
113  * while-loop first. This has the potential to cause a
114  * deadlock. Need a load barrier.
115  */
116  __atomic_thread_fence(__ATOMIC_ACQUIRE);
117  /* More nodes added to the queue by other CPUs.
118  * Wait until the next pointer is set.
119  */
120  while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
121  rte_pause();
122  }
123 
124  /* Pass lock to next waiter. */
125  __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
126 }
127 
141 __rte_experimental
142 static inline int
144 {
145  /* Init me node */
146  __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
147 
148  /* Try to lock */
149  rte_mcslock_t *expected = NULL;
150 
151  /* The lock can be taken only when the queue is empty. Hence,
152  * the compare-exchange operation requires acquire semantics.
153  * The store to me->next above should complete before the node
154  * is visible to other CPUs/threads. Hence, the compare-exchange
155  * operation requires release semantics as well.
156  */
157  return __atomic_compare_exchange_n(msl, &expected, me, 0,
158  __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
159 }
160 
172 __rte_experimental
173 static inline int
175 {
176  return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
177 }
178 
179 #endif /* _RTE_MCSLOCK_H_ */
#define likely(x)
static __rte_experimental int rte_mcslock_is_locked(rte_mcslock_t *msl)
Definition: rte_mcslock.h:174
static __rte_experimental void rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:100
struct rte_mcslock rte_mcslock_t
static __rte_experimental int rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:143
static void rte_pause(void)
static __rte_experimental void rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
Definition: rte_mcslock.h:50