DPDK  20.11.10
rte_ring_peek_zc.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2020 Arm Limited
4  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
5  * All rights reserved.
6  * Derived from FreeBSD's bufring.h
7  * Used as BSD-3 Licensed with permission from Kip Macy.
8  */
9 
10 #ifndef _RTE_RING_PEEK_ZC_H_
11 #define _RTE_RING_PEEK_ZC_H_
12 
71 #ifdef __cplusplus
72 extern "C" {
73 #endif
74 
75 #include <rte_ring_peek_c11_mem.h>
76 
84  /* Pointer to the first space in the ring */
85  void *ptr1;
86  /* Pointer to the second space in the ring if there is wrap-around.
87  * It contains valid value only if wrap-around happens.
88  */
89  void *ptr2;
90  /* Number of elements in the first pointer. If this is equal to
91  * the number of elements requested, then ptr2 is NULL.
92  * Otherwise, subtracting n1 from number of elements requested
93  * will give the number of elements available at ptr2.
94  */
95  unsigned int n1;
97 
98 static __rte_always_inline void
99 __rte_ring_get_elem_addr(struct rte_ring *r, uint32_t head,
100  uint32_t esize, uint32_t num, void **dst1, uint32_t *n1, void **dst2)
101 {
102  uint32_t idx, scale, nr_idx;
103  uint32_t *ring = (uint32_t *)&r[1];
104 
105  /* Normalize to uint32_t */
106  scale = esize / sizeof(uint32_t);
107  idx = head & r->mask;
108  nr_idx = idx * scale;
109 
110  *dst1 = ring + nr_idx;
111  *n1 = num;
112 
113  if (idx + num > r->size) {
114  *n1 = r->size - idx;
115  *dst2 = ring;
116  } else {
117  *dst2 = NULL;
118  }
119 }
120 
124 static __rte_always_inline unsigned int
125 __rte_ring_do_enqueue_zc_elem_start(struct rte_ring *r, unsigned int esize,
126  uint32_t n, enum rte_ring_queue_behavior behavior,
127  struct rte_ring_zc_data *zcd, unsigned int *free_space)
128 {
129  uint32_t free, head, next;
130 
131  switch (r->prod.sync_type) {
132  case RTE_RING_SYNC_ST:
133  n = __rte_ring_move_prod_head(r, RTE_RING_SYNC_ST, n,
134  behavior, &head, &next, &free);
135  break;
136  case RTE_RING_SYNC_MT_HTS:
137  n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
138  break;
139  case RTE_RING_SYNC_MT:
140  case RTE_RING_SYNC_MT_RTS:
141  default:
142  /* unsupported mode, shouldn't be here */
143  RTE_ASSERT(0);
144  n = 0;
145  free = 0;
146  return n;
147  }
148 
149  __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
150  &zcd->n1, &zcd->ptr2);
151 
152  if (free_space != NULL)
153  *free_space = free - n;
154  return n;
155 }
156 
180 __rte_experimental
181 static __rte_always_inline unsigned int
182 rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
183  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
184 {
185  return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
186  RTE_RING_QUEUE_FIXED, zcd, free_space);
187 }
188 
211 __rte_experimental
212 static __rte_always_inline unsigned int
213 rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n,
214  struct rte_ring_zc_data *zcd, unsigned int *free_space)
215 {
216  return rte_ring_enqueue_zc_bulk_elem_start(r, sizeof(uintptr_t), n,
217  zcd, free_space);
218 }
219 
243 __rte_experimental
244 static __rte_always_inline unsigned int
245 rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
246  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
247 {
248  return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
249  RTE_RING_QUEUE_VARIABLE, zcd, free_space);
250 }
251 
274 __rte_experimental
275 static __rte_always_inline unsigned int
276 rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n,
277  struct rte_ring_zc_data *zcd, unsigned int *free_space)
278 {
279  return rte_ring_enqueue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
280  zcd, free_space);
281 }
282 
293 __rte_experimental
294 static __rte_always_inline void
295 rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
296 {
297  uint32_t tail;
298 
299  switch (r->prod.sync_type) {
300  case RTE_RING_SYNC_ST:
301  n = __rte_ring_st_get_tail(&r->prod, &tail, n);
302  __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
303  break;
304  case RTE_RING_SYNC_MT_HTS:
305  n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
306  __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
307  break;
308  case RTE_RING_SYNC_MT:
309  case RTE_RING_SYNC_MT_RTS:
310  default:
311  /* unsupported mode, shouldn't be here */
312  RTE_ASSERT(0);
313  }
314 }
315 
326 __rte_experimental
327 static __rte_always_inline void
328 rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
329 {
331 }
332 
337 static __rte_always_inline unsigned int
338 __rte_ring_do_dequeue_zc_elem_start(struct rte_ring *r,
339  uint32_t esize, uint32_t n, enum rte_ring_queue_behavior behavior,
340  struct rte_ring_zc_data *zcd, unsigned int *available)
341 {
342  uint32_t avail, head, next;
343 
344  switch (r->cons.sync_type) {
345  case RTE_RING_SYNC_ST:
346  n = __rte_ring_move_cons_head(r, RTE_RING_SYNC_ST, n,
347  behavior, &head, &next, &avail);
348  break;
349  case RTE_RING_SYNC_MT_HTS:
350  n = __rte_ring_hts_move_cons_head(r, n, behavior,
351  &head, &avail);
352  break;
353  case RTE_RING_SYNC_MT:
354  case RTE_RING_SYNC_MT_RTS:
355  default:
356  /* unsupported mode, shouldn't be here */
357  RTE_ASSERT(0);
358  n = 0;
359  avail = 0;
360  return n;
361  }
362 
363  __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
364  &zcd->n1, &zcd->ptr2);
365 
366  if (available != NULL)
367  *available = avail - n;
368  return n;
369 }
370 
393 __rte_experimental
394 static __rte_always_inline unsigned int
395 rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize,
396  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
397 {
398  return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
399  RTE_RING_QUEUE_FIXED, zcd, available);
400 }
401 
423 __rte_experimental
424 static __rte_always_inline unsigned int
425 rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n,
426  struct rte_ring_zc_data *zcd, unsigned int *available)
427 {
428  return rte_ring_dequeue_zc_bulk_elem_start(r, sizeof(uintptr_t),
429  n, zcd, available);
430 }
431 
456 __rte_experimental
457 static __rte_always_inline unsigned int
458 rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize,
459  unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
460 {
461  return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
462  RTE_RING_QUEUE_VARIABLE, zcd, available);
463 }
464 
486 __rte_experimental
487 static __rte_always_inline unsigned int
488 rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n,
489  struct rte_ring_zc_data *zcd, unsigned int *available)
490 {
491  return rte_ring_dequeue_zc_burst_elem_start(r, sizeof(uintptr_t), n,
492  zcd, available);
493 }
494 
505 __rte_experimental
506 static __rte_always_inline void
507 rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
508 {
509  uint32_t tail;
510 
511  switch (r->cons.sync_type) {
512  case RTE_RING_SYNC_ST:
513  n = __rte_ring_st_get_tail(&r->cons, &tail, n);
514  __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
515  break;
516  case RTE_RING_SYNC_MT_HTS:
517  n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
518  __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
519  break;
520  case RTE_RING_SYNC_MT:
521  case RTE_RING_SYNC_MT_RTS:
522  default:
523  /* unsupported mode, shouldn't be here */
524  RTE_ASSERT(0);
525  }
526 }
527 
538 __rte_experimental
539 static __rte_always_inline void
540 rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
541 {
543 }
544 
545 #ifdef __cplusplus
546 }
547 #endif
548 
549 #endif /* _RTE_RING_PEEK_ZC_H_ */
#define __rte_always_inline
Definition: rte_common.h:231
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline void rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
uint32_t size
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
#define __rte_cache_aligned
Definition: rte_common.h:405
uint32_t mask
static __rte_experimental __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_experimental __rte_always_inline void rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline void rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_experimental __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)