DPDK  19.08.2
rte_stack_lf_generic.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef _RTE_STACK_LF_GENERIC_H_
6 #define _RTE_STACK_LF_GENERIC_H_
7 
9 #include <rte_prefetch.h>
10 
11 static __rte_always_inline unsigned int
12 __rte_stack_lf_count(struct rte_stack *s)
13 {
14  /* stack_lf_push() and stack_lf_pop() do not update the list's contents
15  * and stack_lf->len atomically, which can cause the list to appear
16  * shorter than it actually is if this function is called while other
17  * threads are modifying the list.
18  *
19  * However, given the inherently approximate nature of the get_count
20  * callback -- even if the list and its size were updated atomically,
21  * the size could change between when get_count executes and when the
22  * value is returned to the caller -- this is acceptable.
23  *
24  * The stack_lf->len updates are placed such that the list may appear to
25  * have fewer elements than it does, but will never appear to have more
26  * elements. If the mempool is near-empty to the point that this is a
27  * concern, the user should consider increasing the mempool size.
28  */
29  return (unsigned int)rte_atomic64_read((rte_atomic64_t *)
30  &s->stack_lf.used.len);
31 }
32 
33 static __rte_always_inline void
34 __rte_stack_lf_push_elems(struct rte_stack_lf_list *list,
35  struct rte_stack_lf_elem *first,
36  struct rte_stack_lf_elem *last,
37  unsigned int num)
38 {
39 #ifndef RTE_ARCH_X86_64
40  RTE_SET_USED(first);
41  RTE_SET_USED(last);
42  RTE_SET_USED(list);
43  RTE_SET_USED(num);
44 #else
45  struct rte_stack_lf_head old_head;
46  int success;
47 
48  old_head = list->head;
49 
50  do {
51  struct rte_stack_lf_head new_head;
52 
53  /* An acquire fence (or stronger) is needed for weak memory
54  * models to establish a synchronized-with relationship between
55  * the list->head load and store-release operations (as part of
56  * the rte_atomic128_cmp_exchange()).
57  */
58  rte_smp_mb();
59 
60  /* Swing the top pointer to the first element in the list and
61  * make the last element point to the old top.
62  */
63  new_head.top = first;
64  new_head.cnt = old_head.cnt + 1;
65 
66  last->next = old_head.top;
67 
68  /* old_head is updated on failure */
70  (rte_int128_t *)&list->head,
71  (rte_int128_t *)&old_head,
72  (rte_int128_t *)&new_head,
73  1, __ATOMIC_RELEASE,
74  __ATOMIC_RELAXED);
75  } while (success == 0);
76 
77  rte_atomic64_add((rte_atomic64_t *)&list->len, num);
78 #endif
79 }
80 
81 static __rte_always_inline struct rte_stack_lf_elem *
82 __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
83  unsigned int num,
84  void **obj_table,
85  struct rte_stack_lf_elem **last)
86 {
87 #ifndef RTE_ARCH_X86_64
88  RTE_SET_USED(obj_table);
89  RTE_SET_USED(last);
90  RTE_SET_USED(list);
91  RTE_SET_USED(num);
92 
93  return NULL;
94 #else
95  struct rte_stack_lf_head old_head;
96  int success;
97 
98  /* Reserve num elements, if available */
99  while (1) {
100  uint64_t len = rte_atomic64_read((rte_atomic64_t *)&list->len);
101 
102  /* Does the list contain enough elements? */
103  if (unlikely(len < num))
104  return NULL;
105 
106  if (rte_atomic64_cmpset((volatile uint64_t *)&list->len,
107  len, len - num))
108  break;
109  }
110 
111  old_head = list->head;
112 
113  /* Pop num elements */
114  do {
115  struct rte_stack_lf_head new_head;
116  struct rte_stack_lf_elem *tmp;
117  unsigned int i;
118 
119  /* An acquire fence (or stronger) is needed for weak memory
120  * models to ensure the LF LIFO element reads are properly
121  * ordered with respect to the head pointer read.
122  */
123  rte_smp_mb();
124 
125  rte_prefetch0(old_head.top);
126 
127  tmp = old_head.top;
128 
129  /* Traverse the list to find the new head. A next pointer will
130  * either point to another element or NULL; if a thread
131  * encounters a pointer that has already been popped, the CAS
132  * will fail.
133  */
134  for (i = 0; i < num && tmp != NULL; i++) {
135  rte_prefetch0(tmp->next);
136  if (obj_table)
137  obj_table[i] = tmp->data;
138  if (last)
139  *last = tmp;
140  tmp = tmp->next;
141  }
142 
143  /* If NULL was encountered, the list was modified while
144  * traversing it. Retry.
145  */
146  if (i != num)
147  continue;
148 
149  new_head.top = tmp;
150  new_head.cnt = old_head.cnt + 1;
151 
152  /* old_head is updated on failure */
153  success = rte_atomic128_cmp_exchange(
154  (rte_int128_t *)&list->head,
155  (rte_int128_t *)&old_head,
156  (rte_int128_t *)&new_head,
157  1, __ATOMIC_RELEASE,
158  __ATOMIC_RELAXED);
159  } while (success == 0);
160 
161  return old_head.top;
162 #endif
163 }
164 
165 #endif /* _RTE_STACK_LF_GENERIC_H_ */
#define __rte_always_inline
Definition: rte_common.h:153
static int64_t rte_atomic64_read(rte_atomic64_t *v)
#define unlikely(x)
static int rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
static void rte_smp_mb(void)
static __rte_experimental int rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp, const rte_int128_t *src, unsigned int weak, int success, int failure)
#define RTE_SET_USED(x)
Definition: rte_common.h:90
static void rte_prefetch0(const volatile void *p)
static void rte_atomic64_add(rte_atomic64_t *v, int64_t inc)