DPDK  24.07.0
rte_seqcount.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Ericsson AB
3  */
4 
5 #ifndef _RTE_SEQCOUNT_H_
6 #define _RTE_SEQCOUNT_H_
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
23 #include <stdbool.h>
24 #include <stdint.h>
25 
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_stdatomic.h>
29 
33 typedef struct {
34  RTE_ATOMIC(uint32_t) sn;
36 
40 #define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
41 
48 static inline void
50 {
51  seqcount->sn = 0;
52 }
53 
96 static inline uint32_t
98 {
99  /* rte_memory_order_acquire to prevent loads after (in program order)
100  * from happening before the sn load. Synchronizes-with the
101  * store release in rte_seqcount_write_end().
102  */
103  return rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_acquire);
104 }
105 
136 static inline bool
137 rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
138 {
139  uint32_t end_sn;
140 
141  /* An odd sequence number means the protected data was being
142  * modified already at the point of the rte_seqcount_read_begin()
143  * call.
144  */
145  if (unlikely(begin_sn & 1))
146  return true;
147 
148  /* make sure the data loads happens before the sn load */
149  rte_atomic_thread_fence(rte_memory_order_acquire);
150 
151  end_sn = rte_atomic_load_explicit(&seqcount->sn, rte_memory_order_relaxed);
152 
153  /* A writer incremented the sequence number during this read
154  * critical section.
155  */
156  return begin_sn != end_sn;
157 }
158 
181 static inline void
183 {
184  uint32_t sn;
185 
186  sn = seqcount->sn + 1;
187 
188  rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_relaxed);
189 
190  /* rte_memory_order_release to prevent stores after (in program order)
191  * from happening before the sn store.
192  */
193  rte_atomic_thread_fence(rte_memory_order_release);
194 }
195 
208 static inline void
210 {
211  uint32_t sn;
212 
213  sn = seqcount->sn + 1;
214 
215  /* Synchronizes-with the load acquire in rte_seqcount_read_begin(). */
216  rte_atomic_store_explicit(&seqcount->sn, sn, rte_memory_order_release);
217 }
218 
219 #ifdef __cplusplus
220 }
221 #endif
222 
223 #endif /* _RTE_SEQCOUNT_H_ */
static void rte_seqcount_write_begin(rte_seqcount_t *seqcount)
Definition: rte_seqcount.h:182
static uint32_t rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
Definition: rte_seqcount.h:97
#define unlikely(x)
static void rte_seqcount_init(rte_seqcount_t *seqcount)
Definition: rte_seqcount.h:49
static void rte_atomic_thread_fence(rte_memory_order memorder)
static void rte_seqcount_write_end(rte_seqcount_t *seqcount)
Definition: rte_seqcount.h:209
static bool rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
Definition: rte_seqcount.h:137