DPDK 25.03.0-rc1
rte_ring_c11_pvt.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2017,2018 HXT-semitech Corporation.
4 * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5 * Copyright (c) 2021 Arm Limited
6 * All rights reserved.
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
9 */
10
11#ifndef _RTE_RING_C11_PVT_H_
12#define _RTE_RING_C11_PVT_H_
13
25static __rte_always_inline void
26__rte_ring_update_tail(struct rte_ring_headtail *ht, uint32_t old_val,
27 uint32_t new_val, uint32_t single, uint32_t enqueue)
28{
29 RTE_SET_USED(enqueue);
30
31 /*
32 * If there are other enqueues/dequeues in progress that preceded us,
33 * we need to wait for them to complete
34 */
35 if (!single)
36 rte_wait_until_equal_32((uint32_t *)(uintptr_t)&ht->tail, old_val,
37 rte_memory_order_relaxed);
38
39 rte_atomic_store_explicit(&ht->tail, new_val, rte_memory_order_release);
40}
41
69static __rte_always_inline unsigned int
70__rte_ring_headtail_move_head(struct rte_ring_headtail *d,
71 const struct rte_ring_headtail *s, uint32_t capacity,
72 unsigned int is_st, unsigned int n,
73 enum rte_ring_queue_behavior behavior,
74 uint32_t *old_head, uint32_t *new_head, uint32_t *entries)
75{
76 uint32_t stail;
77 int success;
78 unsigned int max = n;
79
80 *old_head = rte_atomic_load_explicit(&d->head,
81 rte_memory_order_relaxed);
82 do {
83 /* Reset n to the initial burst count */
84 n = max;
85
86 /* Ensure the head is read before tail */
87 rte_atomic_thread_fence(rte_memory_order_acquire);
88
89 /* load-acquire synchronize with store-release of ht->tail
90 * in update_tail.
91 */
92 stail = rte_atomic_load_explicit(&s->tail,
93 rte_memory_order_acquire);
94
95 /* The subtraction is done between two unsigned 32bits value
96 * (the result is always modulo 32 bits even if we have
97 * *old_head > s->tail). So 'entries' is always between 0
98 * and capacity (which is < size).
99 */
100 *entries = (capacity + stail - *old_head);
101
102 /* check that we have enough room in ring */
103 if (unlikely(n > *entries))
104 n = (behavior == RTE_RING_QUEUE_FIXED) ?
105 0 : *entries;
106
107 if (n == 0)
108 return 0;
109
110 *new_head = *old_head + n;
111 if (is_st) {
112 d->head = *new_head;
113 success = 1;
114 } else
115 /* on failure, *old_head is updated */
116 success = rte_atomic_compare_exchange_strong_explicit(
117 &d->head, old_head, *new_head,
118 rte_memory_order_relaxed,
119 rte_memory_order_relaxed);
120 } while (unlikely(success == 0));
121 return n;
122}
123
124#endif /* _RTE_RING_C11_PVT_H_ */
static void rte_atomic_thread_fence(rte_memory_order memorder)
#define unlikely(x)
#define RTE_SET_USED(x)
Definition: rte_common.h:226
#define __rte_always_inline
Definition: rte_common.h:452
static __rte_always_inline void rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, rte_memory_order memorder)
Definition: rte_pause.h:95
rte_ring_queue_behavior
Definition: rte_ring_core.h:40
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:42