35 #include <rte_compat.h>
37 #include <rte_config.h>
72 #define PKT_RX_VLAN (1ULL << 0)
74 #define PKT_RX_RSS_HASH (1ULL << 1)
75 #define PKT_RX_FDIR (1ULL << 2)
84 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
93 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
95 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
103 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
113 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
115 #define PKT_RX_IP_CKSUM_UNKNOWN 0
116 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
117 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
118 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
128 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
130 #define PKT_RX_L4_CKSUM_UNKNOWN 0
131 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
132 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
133 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
135 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
136 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
137 #define PKT_RX_FDIR_ID (1ULL << 13)
138 #define PKT_RX_FDIR_FLX (1ULL << 14)
148 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
155 #define PKT_RX_LRO (1ULL << 16)
160 #define PKT_RX_TIMESTAMP (1ULL << 17)
165 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
170 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
180 #define PKT_RX_QINQ (1ULL << 20)
194 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
196 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
197 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
198 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
199 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
208 #define PKT_TX_METADATA (1ULL << 40)
219 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
226 #define PKT_TX_UDP_SEG (1ULL << 42)
231 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
237 #define PKT_TX_MACSEC (1ULL << 44)
247 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
248 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
249 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
250 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
252 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
253 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
265 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
278 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
280 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
287 #define PKT_TX_QINQ (1ULL << 49)
289 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
300 #define PKT_TX_TCP_SEG (1ULL << 50)
302 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
312 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
313 #define PKT_TX_TCP_CKSUM (1ULL << 52)
314 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
315 #define PKT_TX_UDP_CKSUM (3ULL << 52)
316 #define PKT_TX_L4_MASK (3ULL << 52)
324 #define PKT_TX_IP_CKSUM (1ULL << 54)
332 #define PKT_TX_IPV4 (1ULL << 55)
340 #define PKT_TX_IPV6 (1ULL << 56)
347 #define PKT_TX_VLAN (1ULL << 57)
349 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
357 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
364 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
371 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
377 #define PKT_TX_OFFLOAD_MASK ( \
378 PKT_TX_OUTER_IPV6 | \
379 PKT_TX_OUTER_IPV4 | \
380 PKT_TX_OUTER_IP_CKSUM | \
386 PKT_TX_IEEE1588_TMST | \
389 PKT_TX_TUNNEL_MASK | \
391 PKT_TX_SEC_OFFLOAD | \
393 PKT_TX_OUTER_UDP_CKSUM | \
399 #define EXT_ATTACHED_MBUF (1ULL << 61)
401 #define IND_ATTACHED_MBUF (1ULL << 62)
404 #define RTE_MBUF_PRIV_ALIGN 8
462 #define RTE_MBUF_DEFAULT_DATAROOM 2048
463 #define RTE_MBUF_DEFAULT_BUF_SIZE \
464 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
492 RTE_MBUF_L2_LEN_BITS = 7,
493 RTE_MBUF_L3_LEN_BITS = 9,
494 RTE_MBUF_L4_LEN_BITS = 8,
495 RTE_MBUF_TSO_SEGSZ_BITS = 16,
496 RTE_MBUF_OUTL3_LEN_BITS = 9,
497 RTE_MBUF_OUTL2_LEN_BITS = 7,
498 RTE_MBUF_TXOFLD_UNUSED_BITS =
sizeof(uint64_t) * CHAR_BIT -
499 RTE_MBUF_L2_LEN_BITS -
500 RTE_MBUF_L3_LEN_BITS -
501 RTE_MBUF_L4_LEN_BITS -
502 RTE_MBUF_TSO_SEGSZ_BITS -
503 RTE_MBUF_OUTL3_LEN_BITS -
504 RTE_MBUF_OUTL2_LEN_BITS,
505 #
if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
506 RTE_MBUF_L2_LEN_OFS =
507 sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS,
508 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS,
509 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS,
510 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS,
511 RTE_MBUF_OUTL3_LEN_OFS =
512 RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS,
513 RTE_MBUF_OUTL2_LEN_OFS =
514 RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS,
515 RTE_MBUF_TXOFLD_UNUSED_OFS =
516 RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS,
518 RTE_MBUF_L2_LEN_OFS = 0,
519 RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
520 RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
521 RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
522 RTE_MBUF_OUTL3_LEN_OFS =
523 RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
524 RTE_MBUF_OUTL2_LEN_OFS =
525 RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS,
526 RTE_MBUF_TXOFLD_UNUSED_OFS =
527 RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS,
578 MARKER rx_descriptor_fields1;
677 MARKER cacheline1 __rte_cache_min_aligned;
758 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
790 #if RTE_CACHE_LINE_SIZE == 64
811 return mb->buf_iova + mb->data_off;
816 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
836 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
841 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
905 #ifdef ALLOW_EXPERIMENTAL_API
928 #ifdef ALLOW_EXPERIMENTAL_API
963 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
970 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
978 #define RTE_MBUF_DIRECT(mb) \
979 (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
992 #ifdef RTE_LIBRTE_MBUF_DEBUG
995 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
1000 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
1004 #ifdef RTE_MBUF_REFCNT_ATOMIC
1013 static inline uint16_t
1033 static inline uint16_t
1034 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
1048 static inline uint16_t
1061 return (uint16_t)value;
1064 return __rte_mbuf_refcnt_update(m, value);
1070 static inline uint16_t
1071 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
1080 static inline uint16_t
1083 return __rte_mbuf_refcnt_update(m, value);
1089 static inline uint16_t
1114 static inline uint16_t
1146 static inline uint16_t
1153 return (uint16_t)value;
1160 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1202 const char **reason);
1204 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1205 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1206 RTE_ASSERT((m)->next == NULL); \
1207 RTE_ASSERT((m)->nb_segs == 1); \
1208 __rte_mbuf_sanity_check(m, 0); \
1236 MBUF_RAW_ALLOC_CHECK(m);
1259 RTE_ASSERT(m->
next == NULL);
1285 void *m,
unsigned i);
1343 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1385 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1399 static inline uint16_t
1420 static inline uint16_t
1439 m->data_off = (uint16_t)
RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1451 #define MBUF_INVALID_PORT UINT16_MAX
1453 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1488 rte_pktmbuf_reset(m);
1507 struct rte_mbuf **mbufs,
unsigned count)
1521 switch (count % 4) {
1523 while (idx != count) {
1524 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1525 rte_pktmbuf_reset(mbufs[idx]);
1529 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1530 rte_pktmbuf_reset(mbufs[idx]);
1534 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1535 rte_pktmbuf_reset(mbufs[idx]);
1539 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1540 rte_pktmbuf_reset(mbufs[idx]);
1590 if (addr <= buf_addr)
1665 RTE_ASSERT(shinfo->
free_cb != NULL);
1668 m->buf_iova = buf_iova;
1685 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1724 mi->buf_iova = m->buf_iova;
1728 mi->data_off = m->data_off;
1754 __rte_pktmbuf_free_extbuf(
struct rte_mbuf *m)
1757 RTE_ASSERT(m->
shinfo != NULL);
1770 __rte_pktmbuf_free_direct(
struct rte_mbuf *m)
1802 uint32_t mbuf_size, buf_len;
1806 __rte_pktmbuf_free_extbuf(m);
1808 __rte_pktmbuf_free_direct(m);
1811 mbuf_size = (uint32_t)(
sizeof(
struct rte_mbuf) + priv_size);
1815 m->
buf_addr = (
char *)m + mbuf_size;
1817 m->
buf_len = (uint16_t)buf_len;
1847 if (m->
next != NULL) {
1854 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1859 if (m->
next != NULL) {
1947 }
while ((md = md->
next) != NULL &&
1981 }
while ((m = m->
next) != NULL);
2024 while (m->
next != NULL)
2043 #define rte_pktmbuf_mtod_offset(m, t, o) \
2044 ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
2058 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
2069 #define rte_pktmbuf_iova_offset(m, o) \
2070 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
2073 #define rte_pktmbuf_mtophys_offset(m, o) \
2074 rte_pktmbuf_iova_offset(m, o)
2083 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
2086 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
2096 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
2106 #define rte_pktmbuf_data_len(m) ((m)->data_len)
2134 m->data_off = (uint16_t)(m->data_off - len);
2138 return (
char *)m->
buf_addr + m->data_off;
2170 return (
char*) tail;
2198 m->data_off = (uint16_t)(m->data_off + len);
2200 return (
char *)m->
buf_addr + m->data_off;
2250 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
2251 uint32_t len,
void *buf);
2274 uint32_t off, uint32_t len,
void *buf)
2279 return __rte_pktmbuf_read(m, off, len, buf);
2308 cur_tail->
next = tail;
2348 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
2349 uint64_t ol3, uint64_t ol2, uint64_t unused)
2351 return il2 << RTE_MBUF_L2_LEN_OFS |
2352 il3 << RTE_MBUF_L3_LEN_OFS |
2353 il4 << RTE_MBUF_L4_LEN_OFS |
2354 tso << RTE_MBUF_TSO_SEGSZ_OFS |
2355 ol3 << RTE_MBUF_OUTL3_LEN_OFS |
2356 ol2 << RTE_MBUF_OUTL2_LEN_OFS |
2357 unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
2392 !(ol_flags & PKT_TX_IP_CKSUM)))
2418 size_t seg_len, copy_len;
2473 static inline uint32_t
2476 return m->hash.sched.queue_id;
2482 static inline uint8_t
2485 return m->hash.sched.traffic_class;
2491 static inline uint8_t
2494 return m->hash.sched.color;
2511 uint8_t *traffic_class,
2518 *color = sched.
color;
2545 m->hash.sched.color =
color;
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
uint16_t mbuf_data_room_size
#define __rte_always_inline
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
uint8_t inner_esp_next_proto
__extension__ typedef void * MARKER[0]
#define RTE_MBUF_DIRECT(mb)
#define IND_ATTACHED_MBUF
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
static int rte_validate_tx_offload(const struct rte_mbuf *m)
static void rte_pktmbuf_free(struct rte_mbuf *m)
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
__extension__ typedef uint8_t MARKER8[0]
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
#define PKT_TX_OUTER_IP_CKSUM
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
#define RTE_PTR_ADD(ptr, x)
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
__extension__ typedef uint64_t MARKER64[0]
struct rte_mbuf_sched sched
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
struct rte_mbuf::@189::@201::@205 txadapter
#define RTE_MBUF_CLONED(mb)
#define __rte_mbuf_sanity_check(m, is_h)
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
RTE_STD_C11 union rte_mbuf::@186 __rte_aligned
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
#define rte_pktmbuf_pkt_len(m)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
static void rte_pktmbuf_detach(struct rte_mbuf *m)
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
#define rte_pktmbuf_data_len(m)
#define rte_pktmbuf_mtod(m, t)
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
#define MBUF_INVALID_PORT
#define EXT_ATTACHED_MBUF
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
#define RTE_MBUF_HAS_EXTBUF(mb)
#define __rte_cache_aligned
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
struct rte_mbuf::@189::@201::@204 fdir
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
#define RTE_PTR_DIFF(ptr1, ptr2)
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
rte_atomic16_t refcnt_atomic
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
rte_atomic16_t refcnt_atomic
#define rte_pktmbuf_mtod_offset(m, t, o)
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)