DPDK  21.02.0
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 #include <rte_ring_elem_pvt.h>
25 
44 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
45 
106 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
107  unsigned int count, int socket_id, unsigned int flags);
108 
131 static __rte_always_inline unsigned int
132 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
133  unsigned int esize, unsigned int n, unsigned int *free_space)
134 {
135  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
137 }
138 
160 static __rte_always_inline unsigned int
161 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
162  unsigned int esize, unsigned int n, unsigned int *free_space)
163 {
164  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
166 }
167 
168 #ifdef ALLOW_EXPERIMENTAL_API
169 #include <rte_ring_hts.h>
170 #include <rte_ring_rts.h>
171 #endif
172 
196 static __rte_always_inline unsigned int
197 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
198  unsigned int esize, unsigned int n, unsigned int *free_space)
199 {
200  switch (r->prod.sync_type) {
201  case RTE_RING_SYNC_MT:
202  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
203  free_space);
204  case RTE_RING_SYNC_ST:
205  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
206  free_space);
207 #ifdef ALLOW_EXPERIMENTAL_API
208  case RTE_RING_SYNC_MT_RTS:
209  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
210  free_space);
211  case RTE_RING_SYNC_MT_HTS:
212  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
213  free_space);
214 #endif
215  }
216 
217  /* valid ring should never reach this point */
218  RTE_ASSERT(0);
219  if (free_space != NULL)
220  *free_space = 0;
221  return 0;
222 }
223 
242 static __rte_always_inline int
243 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
244 {
245  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
246  -ENOBUFS;
247 }
248 
266 static __rte_always_inline int
267 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
268 {
269  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
270  -ENOBUFS;
271 }
272 
292 static __rte_always_inline int
293 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
294 {
295  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
296  -ENOBUFS;
297 }
298 
321 static __rte_always_inline unsigned int
322 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
323  unsigned int esize, unsigned int n, unsigned int *available)
324 {
325  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
327 }
328 
349 static __rte_always_inline unsigned int
350 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
351  unsigned int esize, unsigned int n, unsigned int *available)
352 {
353  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
355 }
356 
380 static __rte_always_inline unsigned int
381 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
382  unsigned int esize, unsigned int n, unsigned int *available)
383 {
384  switch (r->cons.sync_type) {
385  case RTE_RING_SYNC_MT:
386  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
387  available);
388  case RTE_RING_SYNC_ST:
389  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
390  available);
391 #ifdef ALLOW_EXPERIMENTAL_API
392  case RTE_RING_SYNC_MT_RTS:
393  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
394  n, available);
395  case RTE_RING_SYNC_MT_HTS:
396  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
397  n, available);
398 #endif
399  }
400 
401  /* valid ring should never reach this point */
402  RTE_ASSERT(0);
403  if (available != NULL)
404  *available = 0;
405  return 0;
406 }
407 
427 static __rte_always_inline int
428 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
429  unsigned int esize)
430 {
431  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
432  -ENOENT;
433 }
434 
451 static __rte_always_inline int
452 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
453  unsigned int esize)
454 {
455  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
456  -ENOENT;
457 }
458 
479 static __rte_always_inline int
480 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
481 {
482  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
483  -ENOENT;
484 }
485 
508 static __rte_always_inline unsigned int
509 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
510  unsigned int esize, unsigned int n, unsigned int *free_space)
511 {
512  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
514 }
515 
537 static __rte_always_inline unsigned int
538 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
539  unsigned int esize, unsigned int n, unsigned int *free_space)
540 {
541  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
543 }
544 
568 static __rte_always_inline unsigned int
569 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
570  unsigned int esize, unsigned int n, unsigned int *free_space)
571 {
572  switch (r->prod.sync_type) {
573  case RTE_RING_SYNC_MT:
574  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
575  free_space);
576  case RTE_RING_SYNC_ST:
577  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
578  free_space);
579 #ifdef ALLOW_EXPERIMENTAL_API
580  case RTE_RING_SYNC_MT_RTS:
581  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
582  n, free_space);
583  case RTE_RING_SYNC_MT_HTS:
584  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
585  n, free_space);
586 #endif
587  }
588 
589  /* valid ring should never reach this point */
590  RTE_ASSERT(0);
591  if (free_space != NULL)
592  *free_space = 0;
593  return 0;
594 }
595 
620 static __rte_always_inline unsigned int
621 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
622  unsigned int esize, unsigned int n, unsigned int *available)
623 {
624  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
626 }
627 
649 static __rte_always_inline unsigned int
650 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
651  unsigned int esize, unsigned int n, unsigned int *available)
652 {
653  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
655 }
656 
680 static __rte_always_inline unsigned int
681 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
682  unsigned int esize, unsigned int n, unsigned int *available)
683 {
684  switch (r->cons.sync_type) {
685  case RTE_RING_SYNC_MT:
686  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
687  available);
688  case RTE_RING_SYNC_ST:
689  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
690  available);
691 #ifdef ALLOW_EXPERIMENTAL_API
692  case RTE_RING_SYNC_MT_RTS:
693  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
694  n, available);
695  case RTE_RING_SYNC_MT_HTS:
696  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
697  n, available);
698 #endif
699  }
700 
701  /* valid ring should never reach this point */
702  RTE_ASSERT(0);
703  if (available != NULL)
704  *available = 0;
705  return 0;
706 }
707 
708 #ifdef ALLOW_EXPERIMENTAL_API
709 #include <rte_ring_peek.h>
710 #include <rte_ring_peek_zc.h>
711 #endif
712 
713 #include <rte_ring.h>
714 
715 #ifdef __cplusplus
716 }
717 #endif
718 
719 #endif /* _RTE_RING_ELEM_H_ */
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
Definition: rte_common.h:226
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:168
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:111
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:55
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:110
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:138
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:83
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:141
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:82
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)