DPDK  24.03.0
rte_ring_peek_zc.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2020 Arm Limited
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_PEEK_ZC_H_
11 #define _RTE_RING_PEEK_ZC_H_
12 
70 #ifdef __cplusplus
71 extern "C" {
72 #endif
73 
74 #include <rte_ring_peek_elem_pvt.h>
75 
83  /* Pointer to the first space in the ring */
84  void *ptr1;
85  /* Pointer to the second space in the ring if there is wrap-around.
86  * It contains valid value only if wrap-around happens.
87  */
88  void *ptr2;
89  /* Number of elements in the first pointer. If this is equal to
90  * the number of elements requested, then ptr2 is NULL.
91  * Otherwise, subtracting n1 from number of elements requested
92  * will give the number of elements available at ptr2.
93  */
94  unsigned int n1;
95 };
96 
97 static __rte_always_inline void
98 __rte_ring_get_elem_addr(struct rte_ring *r, uint32_t head,
99  uint32_t esize, uint32_t num, void **dst1, uint32_t *n1, void **dst2)
100 {
101  uint32_t idx, scale, nr_idx;
102  uint32_t *ring = (uint32_t *)&r[1];
103 
104  /* Normalize to uint32_t */
105  scale = esize / sizeof(uint32_t);
106  idx = head & r->mask;
107  nr_idx = idx * scale;
108 
109  *dst1 = ring + nr_idx;
110  *n1 = num;
111 
112  if (idx + num > r->size) {
113  *n1 = r->size - idx;
114  *dst2 = ring;
115  } else {
116  *dst2 = NULL;
117  }
118 }
119 
123 static __rte_always_inline unsigned int
124 __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize,
125  uint32_t n, enum rte_ring_queue_behavior behavior,
126  struct rte_ring_zc_data *zcd, unsigned int *free_space)
127 {
128  uint32_t free, head, next;
129 
130  switch (r->prod.sync_type) {
131  case RTE_RING_SYNC_ST:
132  n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
133  behavior, &head, &next, &free);
134  break;
136  n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
137  break;
138  case RTE_RING_SYNC_MT:
140  default:
141  /* unsupported mode, shouldn't be here */
142  RTE_ASSERT(0);
143  n = 0;
144  free = 0;
145  return n;
146  }
147 
148  __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
149  &zcd->n1, &zcd->ptr2);
150 
151  if (free_space != NULL)
152  *free_space = free - n;
153  return n;
154 }
155 
179 static __rte_always_inline unsigned int
180 rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
181  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
182 {
183  return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
184  RTE_RING_QUEUE_FIXED, zcd, free_space);
185 }
186 
209 static __rte_always_inline unsigned int
210 rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n,
211  struct rte_ring_zc_data *zcd, unsigned int *free_space)
212 {
213  return rte_ring_enqueue_zc_bulk_elem_start(r, sizeof(uintptr_t), n,
214  zcd, free_space);
215 }
216 
240 static __rte_always_inline unsigned int
241 rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
242  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
243 {
244  return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
245  RTE_RING_QUEUE_VARIABLE, zcd, free_space);
246 }
247 
270 static __rte_always_inline unsigned int
271 rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n,
272  struct rte_ring_zc_data *zcd, unsigned int *free_space)
273 {
274  return rte_ring_enqueue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
275  zcd, free_space);
276 }
277 
288 static __rte_always_inline void
289 rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
290 {
291  uint32_t tail;
292 
293  switch (r->prod.sync_type) {
294  case RTE_RING_SYNC_ST:
295  n = __rte_ring_st_get_tail(&r->prod, &tail, n);
296  __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
297  break;
299  n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
300  __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
301  break;
302  case RTE_RING_SYNC_MT:
304  default:
305  /* unsupported mode, shouldn't be here */
306  RTE_ASSERT(0);
307  }
308 }
309 
320 static __rte_always_inline void
321 rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
322 {
324 }
325 
330 static __rte_always_inline unsigned int
331 __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r,
332  uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
333  struct rte_ring_zc_data *zcd, unsigned int *available)
334 {
335  uint32_t avail, head, next;
336 
337  switch (r->cons.sync_type) {
338  case RTE_RING_SYNC_ST:
339  n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
340  behavior, &head, &next, &avail);
341  break;
343  n = __rte_ring_hts_move_cons_head(r, n, behavior,
344  &head, &avail);
345  break;
346  case RTE_RING_SYNC_MT:
348  default:
349  /* unsupported mode, shouldn't be here */
350  RTE_ASSERT(0);
351  n = 0;
352  avail = 0;
353  return n;
354  }
355 
356  __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
357  &zcd->n1, &zcd->ptr2);
358 
359  if (available != NULL)
360  *available = avail - n;
361  return n;
362 }
363 
386 static __rte_always_inline unsigned int
387 rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
388  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
389 {
390  return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
391  RTE_RING_QUEUE_FIXED, zcd, available);
392 }
393 
415 static __rte_always_inline unsigned int
416 rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n,
417  struct rte_ring_zc_data *zcd, unsigned int *available)
418 {
419  return rte_ring_dequeue_zc_bulk_elem_start(r, sizeof(uintptr_t),
420  n, zcd, available);
421 }
422 
447 static __rte_always_inline unsigned int
448 rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
449  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
450 {
451  return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
452  RTE_RING_QUEUE_VARIABLE, zcd, available);
453 }
454 
476 static __rte_always_inline unsigned int
477 rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n,
478  struct rte_ring_zc_data *zcd, unsigned int *available)
479 {
480  return rte_ring_dequeue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
481  zcd, available);
482 }
483 
494 static __rte_always_inline void
495 rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
496 {
497  uint32_t tail;
498 
499  switch (r->cons.sync_type) {
500  case RTE_RING_SYNC_ST:
501  n = __rte_ring_st_get_tail(&r->cons, &tail, n);
502  __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
503  break;
505  n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
506  __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
507  break;
508  case RTE_RING_SYNC_MT:
510  default:
511  /* unsupported mode, shouldn't be here */
512  RTE_ASSERT(0);
513  }
514 }
515 
526 static __rte_always_inline void
527 rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
528 {
530 }
531 
532 #ifdef __cplusplus
533 }
534 #endif
535 
536 #endif /* _RTE_RING_PEEK_ZC_H_ */
#define __rte_always_inline
Definition: rte_common.h:355
static __rte_always_inline void rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
#define __rte_cache_aligned
Definition: rte_common.h:553
static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
uint32_t size
static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
uint32_t mask
static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline void rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline void rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline void rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)