10 #ifndef _RTE_RING_PEEK_ZC_H_ 11 #define _RTE_RING_PEEK_ZC_H_ 98 __rte_ring_get_elem_addr(
struct rte_ring *r, uint32_t head,
99 uint32_t esize, uint32_t num,
void **dst1, uint32_t *n1,
void **dst2)
101 uint32_t idx, scale, nr_idx;
102 uint32_t *ring = (uint32_t *)&r[1];
105 scale = esize /
sizeof(uint32_t);
106 idx = head & r->
mask;
107 nr_idx = idx * scale;
109 *dst1 = ring + nr_idx;
112 if (idx + num > r->
size) {
124 __rte_ring_do_enqueue_zc_elem_start(
struct rte_ring *r,
unsigned int esize,
128 uint32_t free, head, next;
130 switch (r->prod.sync_type) {
133 behavior, &head, &next, &free);
136 n = __rte_ring_hts_move_prod_head(r, n, behavior, &head, &free);
148 __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
149 &zcd->n1, &zcd->ptr2);
151 if (free_space != NULL)
152 *free_space = free - n;
183 return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
244 return __rte_ring_do_enqueue_zc_elem_start(r, esize, n,
293 switch (r->prod.sync_type) {
295 n = __rte_ring_st_get_tail(&r->prod, &tail, n);
296 __rte_ring_st_set_head_tail(&r->prod, tail, n, 1);
299 n = __rte_ring_hts_get_tail(&r->hts_prod, &tail, n);
300 __rte_ring_hts_set_head_tail(&r->hts_prod, tail, n, 1);
331 __rte_ring_do_dequeue_zc_elem_start(
struct rte_ring *r,
335 uint32_t avail, head, next;
337 switch (r->cons.sync_type) {
340 behavior, &head, &next, &avail);
343 n = __rte_ring_hts_move_cons_head(r, n, behavior,
356 __rte_ring_get_elem_addr(r, head, esize, n, &zcd->ptr1,
357 &zcd->n1, &zcd->ptr2);
359 if (available != NULL)
360 *available = avail - n;
390 return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
451 return __rte_ring_do_dequeue_zc_elem_start(r, esize, n,
499 switch (r->cons.sync_type) {
501 n = __rte_ring_st_get_tail(&r->cons, &tail, n);
502 __rte_ring_st_set_head_tail(&r->cons, tail, n, 0);
505 n = __rte_ring_hts_get_tail(&r->hts_cons, &tail, n);
506 __rte_ring_hts_set_head_tail(&r->hts_cons, tail, n, 0);
#define __rte_always_inline
static __rte_always_inline void rte_ring_enqueue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline void rte_ring_dequeue_elem_finish(struct rte_ring *r, unsigned int n)
#define __rte_cache_aligned
static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_bulk_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline void rte_ring_enqueue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline void rte_ring_dequeue_zc_elem_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_dequeue_zc_burst_start(struct rte_ring *r, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *available)
static __rte_always_inline void rte_ring_dequeue_zc_finish(struct rte_ring *r, unsigned int n)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_burst_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_enqueue_zc_bulk_elem_start(struct rte_ring *r, unsigned int esize, unsigned int n, struct rte_ring_zc_data *zcd, unsigned int *free_space)