10 #ifndef _RTE_RING_RTS_H_ 11 #define _RTE_RING_RTS_H_ 84 __rte_ring_do_rts_enqueue_elem(
struct rte_ring *r,
const void *obj_table,
90 n = __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);
93 __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
94 __rte_ring_rts_update_tail(&r->rts_prod);
97 if (free_space != NULL)
98 *free_space = free - n;
125 __rte_ring_do_rts_dequeue_elem(
struct rte_ring *r,
void *obj_table,
129 uint32_t entries, head;
131 n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);
134 __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
135 __rte_ring_rts_update_tail(&r->rts_cons);
138 if (available != NULL)
139 *available = entries - n;
165 unsigned int esize,
unsigned int n,
unsigned int *free_space)
167 return __rte_ring_do_rts_enqueue_elem(r, obj_table, esize, n,
193 unsigned int esize,
unsigned int n,
unsigned int *available)
195 return __rte_ring_do_rts_dequeue_elem(r, obj_table, esize, n,
221 unsigned int esize,
unsigned int n,
unsigned int *free_space)
223 return __rte_ring_do_rts_enqueue_elem(r, obj_table, esize, n,
251 unsigned int esize,
unsigned int n,
unsigned int *available)
253 return __rte_ring_do_rts_dequeue_elem(r, obj_table, esize, n,
275 unsigned int n,
unsigned int *free_space)
278 sizeof(uintptr_t), n, free_space);
299 unsigned int n,
unsigned int *available)
302 sizeof(uintptr_t), n, available);
323 unsigned int n,
unsigned int *free_space)
326 sizeof(uintptr_t), n, free_space);
349 unsigned int n,
unsigned int *available)
352 sizeof(uintptr_t), n, available);
365 static inline uint32_t
368 if (r->prod.
sync_type == RTE_RING_SYNC_MT_RTS)
369 return r->rts_prod.htd_max;
388 if (r->prod.
sync_type != RTE_RING_SYNC_MT_RTS)
391 r->rts_prod.htd_max = v;
405 static inline uint32_t
408 if (r->cons.
sync_type == RTE_RING_SYNC_MT_RTS)
409 return r->rts_cons.htd_max;
428 if (r->cons.
sync_type != RTE_RING_SYNC_MT_RTS)
431 r->rts_cons.htd_max = v;
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
#define __rte_always_inline
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental int rte_ring_set_cons_htd_max(struct rte_ring *r, uint32_t v)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental int rte_ring_set_prod_htd_max(struct rte_ring *r, uint32_t v)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_experimental uint32_t rte_ring_get_cons_htd_max(const struct rte_ring *r)
enum rte_ring_sync_type sync_type
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_experimental uint32_t rte_ring_get_prod_htd_max(const struct rte_ring *r)