DPDK  24.11.0-rc1
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #include <rte_ring_core.h>
20 #include <rte_ring_elem_pvt.h>
21 
40 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
41 
101 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
102  unsigned int count, int socket_id, unsigned int flags);
103 
126 static __rte_always_inline unsigned int
127 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
128  unsigned int esize, unsigned int n, unsigned int *free_space)
129 {
130  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
132 }
133 
155 static __rte_always_inline unsigned int
156 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
157  unsigned int esize, unsigned int n, unsigned int *free_space)
158 {
159  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
161 }
162 
163 #include <rte_ring_hts.h>
164 #include <rte_ring_rts.h>
165 
189 static __rte_always_inline unsigned int
190 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
191  unsigned int esize, unsigned int n, unsigned int *free_space)
192 {
193  switch (r->prod.sync_type) {
194  case RTE_RING_SYNC_MT:
195  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
196  free_space);
197  case RTE_RING_SYNC_ST:
198  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
199  free_space);
201  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
202  free_space);
204  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
205  free_space);
206  }
207 
208  /* valid ring should never reach this point */
209  RTE_ASSERT(0);
210  if (free_space != NULL)
211  *free_space = 0;
212  return 0;
213 }
214 
233 static __rte_always_inline int
234 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
235 {
236  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
237  -ENOBUFS;
238 }
239 
257 static __rte_always_inline int
258 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
259 {
260  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
261  -ENOBUFS;
262 }
263 
283 static __rte_always_inline int
284 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
285 {
286  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
287  -ENOBUFS;
288 }
289 
312 static __rte_always_inline unsigned int
313 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
314  unsigned int esize, unsigned int n, unsigned int *available)
315 {
316  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
318 }
319 
340 static __rte_always_inline unsigned int
341 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
342  unsigned int esize, unsigned int n, unsigned int *available)
343 {
344  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
346 }
347 
371 static __rte_always_inline unsigned int
372 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
373  unsigned int esize, unsigned int n, unsigned int *available)
374 {
375  switch (r->cons.sync_type) {
376  case RTE_RING_SYNC_MT:
377  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
378  available);
379  case RTE_RING_SYNC_ST:
380  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
381  available);
383  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
384  n, available);
386  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
387  n, available);
388  }
389 
390  /* valid ring should never reach this point */
391  RTE_ASSERT(0);
392  if (available != NULL)
393  *available = 0;
394  return 0;
395 }
396 
416 static __rte_always_inline int
417 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
418  unsigned int esize)
419 {
420  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
421  -ENOENT;
422 }
423 
440 static __rte_always_inline int
441 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
442  unsigned int esize)
443 {
444  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
445  -ENOENT;
446 }
447 
468 static __rte_always_inline int
469 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
470 {
471  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
472  -ENOENT;
473 }
474 
497 static __rte_always_inline unsigned int
498 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
499  unsigned int esize, unsigned int n, unsigned int *free_space)
500 {
501  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
503 }
504 
526 static __rte_always_inline unsigned int
527 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
528  unsigned int esize, unsigned int n, unsigned int *free_space)
529 {
530  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
532 }
533 
557 static __rte_always_inline unsigned int
558 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
559  unsigned int esize, unsigned int n, unsigned int *free_space)
560 {
561  switch (r->prod.sync_type) {
562  case RTE_RING_SYNC_MT:
563  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
564  free_space);
565  case RTE_RING_SYNC_ST:
566  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
567  free_space);
569  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
570  n, free_space);
572  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
573  n, free_space);
574  }
575 
576  /* valid ring should never reach this point */
577  RTE_ASSERT(0);
578  if (free_space != NULL)
579  *free_space = 0;
580  return 0;
581 }
582 
607 static __rte_always_inline unsigned int
608 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
609  unsigned int esize, unsigned int n, unsigned int *available)
610 {
611  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
613 }
614 
636 static __rte_always_inline unsigned int
637 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
638  unsigned int esize, unsigned int n, unsigned int *available)
639 {
640  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
642 }
643 
667 static __rte_always_inline unsigned int
668 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
669  unsigned int esize, unsigned int n, unsigned int *available)
670 {
671  switch (r->cons.sync_type) {
672  case RTE_RING_SYNC_MT:
673  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
674  available);
675  case RTE_RING_SYNC_ST:
676  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
677  available);
679  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
680  n, available);
682  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
683  n, available);
684  }
685 
686  /* valid ring should never reach this point */
687  RTE_ASSERT(0);
688  if (available != NULL)
689  *available = 0;
690  return 0;
691 }
692 
693 #include <rte_ring_peek.h>
694 #include <rte_ring_peek_zc.h>
695 
696 #include <rte_ring.h>
697 
698 #endif /* _RTE_RING_ELEM_H_ */
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
#define __rte_always_inline
Definition: rte_common.h:404
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:107
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:80
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:136
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:163
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:53
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
char name[RTE_RING_NAMESIZE]
static __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:107
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:80
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
static __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:134
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)