DPDK  24.03.0
rte_ring_c11_pvt.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2017,2018 HXT-semitech Corporation.
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * Copyright (c) 2021 Arm Limited
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_C11_PVT_H_
12 #define _RTE_RING_C11_PVT_H_
13 
14 static __rte_always_inline void
15 __rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
16  uint32_t new_val, uint32_t single, uint32_t enqueue)
17 {
18  RTE_SET_USED(enqueue);
19 
20  /*
21  * If there are other enqueues/dequeues in progress that preceded us,
22  * we need to wait for them to complete
23  */
24  if (!single)
25  rte_wait_until_equal_32((uint32_t *)(uintptr_t)&ht->tail, old_val,
26  rte_memory_order_relaxed);
27 
28  rte_atomic_store_explicit(&ht->tail, new_val, rte_memory_order_release);
29 }
30 
54 static __rte_always_inline unsigned int
55 __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
56  unsigned int n, enum rte_ring_queue_behavior behavior,
57  uint32_t *old_head, uint32_t *new_head,
58  uint32_t *free_entries)
59 {
60  const uint32_t capacity = r->capacity;
61  uint32_t cons_tail;
62  unsigned int max = n;
63  int success;
64 
65  *old_head = rte_atomic_load_explicit(&r->prod.head, rte_memory_order_relaxed);
66  do {
67  /* Reset n to the initial burst count */
68  n = max;
69 
70  /* Ensure the head is read before tail */
71  rte_atomic_thread_fence(rte_memory_order_acquire);
72 
73  /* load-acquire synchronize with store-release of ht->tail
74  * in update_tail.
75  */
76  cons_tail = rte_atomic_load_explicit(&r->cons.tail,
77  rte_memory_order_acquire);
78 
79  /* The subtraction is done between two unsigned 32bits value
80  * (the result is always modulo 32 bits even if we have
81  * *old_head > cons_tail). So 'free_entries' is always between 0
82  * and capacity (which is < size).
83  */
84  *free_entries = (capacity + cons_tail - *old_head);
85 
86  /* check that we have enough room in ring */
87  if (unlikely(n > *free_entries))
88  n = (behavior == RTE_RING_QUEUE_FIXED) ?
89  0 : *free_entries;
90 
91  if (n == 0)
92  return 0;
93 
94  *new_head = *old_head + n;
95  if (is_sp) {
96  r->prod.head = *new_head;
97  success = 1;
98  } else
99  /* on failure, *old_head is updated */
100  success = rte_atomic_compare_exchange_strong_explicit(&r->prod.head,
101  old_head, *new_head,
102  rte_memory_order_relaxed,
103  rte_memory_order_relaxed);
104  } while (unlikely(success == 0));
105  return n;
106 }
107 
131 static __rte_always_inline unsigned int
132 __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
133  unsigned int n, enum rte_ring_queue_behavior behavior,
134  uint32_t *old_head, uint32_t *new_head,
135  uint32_t *entries)
136 {
137  unsigned int max = n;
138  uint32_t prod_tail;
139  int success;
140 
141  /* move cons.head atomically */
142  *old_head = rte_atomic_load_explicit(&r->cons.head, rte_memory_order_relaxed);
143  do {
144  /* Restore n as it may change every loop */
145  n = max;
146 
147  /* Ensure the head is read before tail */
148  rte_atomic_thread_fence(rte_memory_order_acquire);
149 
150  /* this load-acquire synchronize with store-release of ht->tail
151  * in update_tail.
152  */
153  prod_tail = rte_atomic_load_explicit(&r->prod.tail,
154  rte_memory_order_acquire);
155 
156  /* The subtraction is done between two unsigned 32bits value
157  * (the result is always modulo 32 bits even if we have
158  * cons_head > prod_tail). So 'entries' is always between 0
159  * and size(ring)-1.
160  */
161  *entries = (prod_tail - *old_head);
162 
163  /* Set the actual entries for dequeue */
164  if (n > *entries)
165  n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
166 
167  if (unlikely(n == 0))
168  return 0;
169 
170  *new_head = *old_head + n;
171  if (is_sc) {
172  r->cons.head = *new_head;
173  success = 1;
174  } else
175  /* on failure, *old_head will be updated */
176  success = rte_atomic_compare_exchange_strong_explicit(&r->cons.head,
177  old_head, *new_head,
178  rte_memory_order_relaxed,
179  rte_memory_order_relaxed);
180  } while (unlikely(success == 0));
181  return n;
182 }
183 
184 #endif /* _RTE_RING_C11_PVT_H_ */
#define __rte_always_inline
Definition: rte_common.h:355
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
Definition: rte_pause.h:91
#define unlikely(x)
static void rte_atomic_thread_fence(rte_memory_order memorder)
uint32_t capacity
#define RTE_SET_USED(x)
Definition: rte_common.h:172