DPDK  21.02.0
rte_ring_peek.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2020 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_PEEK_H_
11 #define _RTE_RING_PEEK_H_
12 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 #include <rte_ring_peek_elem_pvt.h>
52 
70 __rte_experimental
71 static __rte_always_inline unsigned int
72 rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n,
73  unsigned int *free_space)
74 {
75  return __rte_ring_do_enqueue_start(r, n, RTE_RING_QUEUE_FIXED,
76  free_space);
77 }
78 
96 __rte_experimental
97 static __rte_always_inline unsigned int
98 rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n,
99  unsigned int *free_space)
100 {
101  return rte_ring_enqueue_bulk_elem_start(r, n, free_space);
102 }
103 
121 __rte_experimental
122 static __rte_always_inline unsigned int
123 rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n,
124  unsigned int *free_space)
125 {
126  return __rte_ring_do_enqueue_start(r, n, RTE_RING_QUEUE_VARIABLE,
127  free_space);
128 }
129 
147 __rte_experimental
148 static __rte_always_inline unsigned int
149 rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n,
150  unsigned int *free_space)
151 {
152  return rte_ring_enqueue_burst_elem_start(r, n, free_space);
153 }
154 
171 __rte_experimental
172 static __rte_always_inline void
173 rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table,
174  unsigned int esize, unsigned int n)
175 {
176  uint32_t tail;
177 
178  switch (r->prod.sync_type) {
179  case RTE_RING_SYNC_ST:
180  n = __rte_ring_st_get_tail(&r->prod, &tail, n);
181  if (n != 0)
182  __rte_ring_enqueue_elems(r, tail, obj_table, esize, n);
183  __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
184  break;
185  case RTE_RING_SYNC_MT_HTS:
186  n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
187  if (n != 0)
188  __rte_ring_enqueue_elems(r, tail, obj_table, esize, n);
189  __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
190  break;
191  case RTE_RING_SYNC_MT:
192  case RTE_RING_SYNC_MT_RTS:
193  default:
194  /* unsupported mode, shouldn't be here */
195  RTE_ASSERT(0);
196  }
197 }
198 
211 __rte_experimental
212 static __rte_always_inline void
213 rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table,
214  unsigned int n)
215 {
216  rte_ring_enqueue_elem_finish(r, obj_table, sizeof(uintptr_t), n);
217 }
218 
240 __rte_experimental
241 static __rte_always_inline unsigned int
242 rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table,
243  unsigned int esize, unsigned int n, unsigned int *available)
244 {
245  return __rte_ring_do_dequeue_start(r, obj_table, esize, n,
246  RTE_RING_QUEUE_FIXED, available);
247 }
248 
266 __rte_experimental
267 static __rte_always_inline unsigned int
268 rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table,
269  unsigned int n, unsigned int *available)
270 {
271  return rte_ring_dequeue_bulk_elem_start(r, obj_table, sizeof(uintptr_t),
272  n, available);
273 }
274 
296 __rte_experimental
297 static __rte_always_inline unsigned int
298 rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table,
299  unsigned int esize, unsigned int n, unsigned int *available)
300 {
301  return __rte_ring_do_dequeue_start(r, obj_table, esize, n,
302  RTE_RING_QUEUE_VARIABLE, available);
303 }
304 
322 __rte_experimental
323 static __rte_always_inline unsigned int
324 rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table,
325  unsigned int n, unsigned int *available)
326 {
327  return rte_ring_dequeue_burst_elem_start(r, obj_table,
328  sizeof(uintptr_t), n, available);
329 }
330 
341 __rte_experimental
342 static __rte_always_inline void
343 rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
344 {
345  uint32_t tail;
346 
347  switch (r->cons.sync_type) {
348  case RTE_RING_SYNC_ST:
349  n = __rte_ring_st_get_tail(&r->cons, &tail, n);
350  __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
351  break;
352  case RTE_RING_SYNC_MT_HTS:
353  n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
354  __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
355  break;
356  case RTE_RING_SYNC_MT:
357  case RTE_RING_SYNC_MT_RTS:
358  default:
359  /* unsupported mode, shouldn't be here */
360  RTE_ASSERT(0);
361  }
362 }
363 
374 __rte_experimental
375 static __rte_always_inline void
376 rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n)
377 {
379 }
380 
381 #ifdef __cplusplus
382 }
383 #endif
384 
385 #endif /* _RTE_RING_PEEK_H_ */
#define __rte_always_inline
Definition: rte_common.h:226
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
Definition: rte_ring_peek.h:72
static __rte_experimental __rte_always_inline void rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
Definition: rte_ring_peek.h:98
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_enqueue_finish(struct rte_ring *r, void *const *obj_table, unsigned int n)