DPDK  20.08.0
rte_ring_peek.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2010-2020 Intel Corporation
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_PEEK_H_
11 #define _RTE_RING_PEEK_H_
12 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 #include <rte_ring_peek_c11_mem.h>
52 
56 static __rte_always_inline unsigned int
57 __rte_ring_do_enqueue_start(struct rte_ring *r, uint32_t n,
58  enum rte_ring_queue_behavior behavior, uint32_t *free_space)
59 {
60  uint32_t free, head, next;
61 
62  switch (r->prod.sync_type) {
63  case RTE_RING_SYNC_ST:
64  n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
65  behavior, &head, &next, &free);
66  break;
67  case RTE_RING_SYNC_MT_HTS:
68  n = __rte_ring_hts_move_prod_head(r, n, behavior,
69  &head, &free);
70  break;
71  case RTE_RING_SYNC_MT:
72  case RTE_RING_SYNC_MT_RTS:
73  default:
74  /* unsupported mode, shouldn't be here */
75  RTE_ASSERT(0);
76  n = 0;
77  free = 0;
78  }
79 
80  if (free_space != NULL)
81  *free_space = free - n;
82  return n;
83 }
84 
102 __rte_experimental
103 static __rte_always_inline unsigned int
104 rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n,
105  unsigned int *free_space)
106 {
107  return __rte_ring_do_enqueue_start(r, n, RTE_RING_QUEUE_FIXED,
108  free_space);
109 }
110 
128 __rte_experimental
129 static __rte_always_inline unsigned int
130 rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n,
131  unsigned int *free_space)
132 {
133  return rte_ring_enqueue_bulk_elem_start(r, n, free_space);
134 }
135 
153 __rte_experimental
154 static __rte_always_inline unsigned int
155 rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n,
156  unsigned int *free_space)
157 {
158  return __rte_ring_do_enqueue_start(r, n, RTE_RING_QUEUE_VARIABLE,
159  free_space);
160 }
161 
179 __rte_experimental
180 static __rte_always_inline unsigned int
181 rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n,
182  unsigned int *free_space)
183 {
184  return rte_ring_enqueue_burst_elem_start(r, n, free_space);
185 }
186 
203 __rte_experimental
204 static __rte_always_inline void
205 rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table,
206  unsigned int esize, unsigned int n)
207 {
208  uint32_t tail;
209 
210  switch (r->prod.sync_type) {
211  case RTE_RING_SYNC_ST:
212  n = __rte_ring_st_get_tail(&r->prod, &tail, n);
213  if (n != 0)
214  __rte_ring_enqueue_elems(r, tail, obj_table, esize, n);
215  __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
216  break;
217  case RTE_RING_SYNC_MT_HTS:
218  n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
219  if (n != 0)
220  __rte_ring_enqueue_elems(r, tail, obj_table, esize, n);
221  __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
222  break;
223  case RTE_RING_SYNC_MT:
224  case RTE_RING_SYNC_MT_RTS:
225  default:
226  /* unsupported mode, shouldn't be here */
227  RTE_ASSERT(0);
228  }
229 }
230 
243 __rte_experimental
244 static __rte_always_inline void
245 rte_ring_enqueue_finish(struct rte_ring *r, void * const *obj_table,
246  unsigned int n)
247 {
248  rte_ring_enqueue_elem_finish(r, obj_table, sizeof(uintptr_t), n);
249 }
250 
255 static __rte_always_inline unsigned int
256 __rte_ring_do_dequeue_start(struct rte_ring *r, void *obj_table,
257  uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
258  uint32_t *available)
259 {
260  uint32_t avail, head, next;
261 
262  switch (r->cons.sync_type) {
263  case RTE_RING_SYNC_ST:
264  n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
265  behavior, &head, &next, &avail);
266  break;
267  case RTE_RING_SYNC_MT_HTS:
268  n = __rte_ring_hts_move_cons_head(r, n, behavior,
269  &head, &avail);
270  break;
271  case RTE_RING_SYNC_MT:
272  case RTE_RING_SYNC_MT_RTS:
273  default:
274  /* unsupported mode, shouldn't be here */
275  RTE_ASSERT(0);
276  n = 0;
277  avail = 0;
278  }
279 
280  if (n != 0)
281  __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
282 
283  if (available != NULL)
284  *available = avail - n;
285  return n;
286 }
287 
309 __rte_experimental
310 static __rte_always_inline unsigned int
311 rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table,
312  unsigned int esize, unsigned int n, unsigned int *available)
313 {
314  return __rte_ring_do_dequeue_start(r, obj_table, esize, n,
315  RTE_RING_QUEUE_FIXED, available);
316 }
317 
335 __rte_experimental
336 static __rte_always_inline unsigned int
337 rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table,
338  unsigned int n, unsigned int *available)
339 {
340  return rte_ring_dequeue_bulk_elem_start(r, obj_table, sizeof(uintptr_t),
341  n, available);
342 }
343 
365 __rte_experimental
366 static __rte_always_inline unsigned int
367 rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table,
368  unsigned int esize, unsigned int n, unsigned int *available)
369 {
370  return __rte_ring_do_dequeue_start(r, obj_table, esize, n,
371  RTE_RING_QUEUE_VARIABLE, available);
372 }
373 
391 __rte_experimental
392 static __rte_always_inline unsigned int
393 rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table,
394  unsigned int n, unsigned int *available)
395 {
396  return rte_ring_dequeue_burst_elem_start(r, obj_table,
397  sizeof(uintptr_t), n, available);
398 }
399 
410 __rte_experimental
411 static __rte_always_inline void
412 rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
413 {
414  uint32_t tail;
415 
416  switch (r->cons.sync_type) {
417  case RTE_RING_SYNC_ST:
418  n = __rte_ring_st_get_tail(&r->cons, &tail, n);
419  __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
420  break;
421  case RTE_RING_SYNC_MT_HTS:
422  n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
423  __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
424  break;
425  case RTE_RING_SYNC_MT:
426  case RTE_RING_SYNC_MT_RTS:
427  default:
428  /* unsupported mode, shouldn't be here */
429  RTE_ASSERT(0);
430  }
431 }
432 
443 __rte_experimental
444 static __rte_always_inline void
445 rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n)
446 {
448 }
449 
450 #ifdef __cplusplus
451 }
452 #endif
453 
454 #endif /* _RTE_RING_PEEK_H_ */
#define __rte_always_inline
Definition: rte_common.h:202
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_burst_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_enqueue_elem_finish(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_burst_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_burst_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem_start(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_bulk_start(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_bulk_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_burst_start(struct rte_ring *r, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_enqueue_finish(struct rte_ring *r, void *const *obj_table, unsigned int n)