35 #include <rte_compat.h>
37 #include <rte_config.h>
131 #if RTE_CACHE_LINE_SIZE == 64
152 return mb->buf_iova + mb->data_off;
157 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
177 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
182 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
246 #ifdef ALLOW_EXPERIMENTAL_API
269 #ifdef ALLOW_EXPERIMENTAL_API
317 static inline uint32_t
323 return mbp_priv->
flags;
332 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
341 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
342 (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
344 #ifdef RTE_LIBRTE_MBUF_DEBUG
347 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
352 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
356 #ifdef RTE_MBUF_REFCNT_ATOMIC
365 static inline uint16_t
385 static inline uint16_t
386 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
400 static inline uint16_t
413 return (uint16_t)value;
416 return __rte_mbuf_refcnt_update(m, value);
422 static inline uint16_t
423 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
432 static inline uint16_t
435 return __rte_mbuf_refcnt_update(m, value);
441 static inline uint16_t
466 static inline uint16_t
498 static inline uint16_t
505 return (uint16_t)value;
512 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
554 const char **reason);
556 #define MBUF_RAW_ALLOC_CHECK(m) do { \
557 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
558 RTE_ASSERT((m)->next == NULL); \
559 RTE_ASSERT((m)->nb_segs == 1); \
560 __rte_mbuf_sanity_check(m, 0); \
588 MBUF_RAW_ALLOC_CHECK(m);
612 RTE_ASSERT(m->
next == NULL);
638 void *m,
unsigned i);
695 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
737 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
795 unsigned int ext_num);
808 static inline uint16_t
829 static inline uint16_t
848 m->data_off = (uint16_t)
RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
860 #define MBUF_INVALID_PORT UINT16_MAX
862 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
897 rte_pktmbuf_reset(m);
916 struct rte_mbuf **mbufs,
unsigned count)
932 while (idx != count) {
933 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
934 rte_pktmbuf_reset(mbufs[idx]);
938 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
939 rte_pktmbuf_reset(mbufs[idx]);
943 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
944 rte_pktmbuf_reset(mbufs[idx]);
948 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
949 rte_pktmbuf_reset(mbufs[idx]);
999 if (addr <= buf_addr)
1078 RTE_ASSERT(shinfo->
free_cb != NULL);
1081 m->buf_iova = buf_iova;
1098 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1122 mdst->hash = msrc->hash;
1165 __rte_pktmbuf_copy_hdr(mi, m);
1167 mi->data_off = m->data_off;
1169 mi->buf_iova = m->buf_iova;
1189 __rte_pktmbuf_free_extbuf(
struct rte_mbuf *m)
1192 RTE_ASSERT(m->
shinfo != NULL);
1205 __rte_pktmbuf_free_direct(
struct rte_mbuf *m)
1242 uint32_t mbuf_size, buf_len;
1261 __rte_pktmbuf_free_extbuf(m);
1263 __rte_pktmbuf_free_direct(m);
1266 mbuf_size = (uint32_t)(
sizeof(
struct rte_mbuf) + priv_size);
1270 m->
buf_addr = (
char *)m + mbuf_size;
1272 m->
buf_len = (uint16_t)buf_len;
1291 static inline int __rte_pktmbuf_pinned_extbuf_decref(
struct rte_mbuf *m)
1341 __rte_pktmbuf_pinned_extbuf_decref(m))
1345 if (m->
next != NULL) {
1352 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1358 __rte_pktmbuf_pinned_extbuf_decref(m))
1362 if (m->
next != NULL) {
1472 uint32_t offset, uint32_t length);
1491 }
while ((m = m->
next) != NULL);
1534 while (m->
next != NULL)
1540 #define rte_pktmbuf_mtophys_offset(m, o) \
1541 rte_pktmbuf_iova_offset(m, o)
1544 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1554 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1564 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1592 m->data_off = (uint16_t)(m->data_off - len);
1596 return (
char *)m->
buf_addr + m->data_off;
1628 return (
char*) tail;
1656 m->data_off = (uint16_t)(m->data_off + len);
1658 return (
char *)m->
buf_addr + m->data_off;
1708 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
1709 uint32_t len,
void *buf);
1732 uint32_t off, uint32_t len,
void *buf)
1737 return __rte_pktmbuf_read(m, off, len, buf);
1766 cur_tail->
next = tail;
1806 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1807 uint64_t ol3, uint64_t ol2, uint64_t unused)
1809 return il2 << RTE_MBUF_L2_LEN_OFS |
1810 il3 << RTE_MBUF_L3_LEN_OFS |
1811 il4 << RTE_MBUF_L4_LEN_OFS |
1812 tso << RTE_MBUF_TSO_SEGSZ_OFS |
1813 ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1814 ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1815 unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1850 !(ol_flags & PKT_TX_IP_CKSUM)))
1864 int __rte_pktmbuf_linearize(
struct rte_mbuf *mbuf);
1883 return __rte_pktmbuf_linearize(mbuf);
1905 static inline uint32_t
1908 return m->hash.sched.queue_id;
1914 static inline uint8_t
1917 return m->hash.sched.traffic_class;
1923 static inline uint8_t
1926 return m->hash.sched.color;
1943 uint8_t *traffic_class,
1950 *color = sched.
color;
1977 m->hash.sched.color =
color;
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
uint16_t mbuf_data_room_size
#define __rte_always_inline
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
static int rte_validate_tx_offload(const struct rte_mbuf *m)
__rte_experimental struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
static void rte_pktmbuf_free(struct rte_mbuf *m)
#define PKT_TX_OUTER_IP_CKSUM
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
__rte_experimental void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
#define PKT_TX_OUTER_IPV4
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
#define IND_ATTACHED_MBUF
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
#define RTE_PTR_ADD(ptr, x)
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define __rte_mbuf_sanity_check(m, is_h)
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
static void rte_pktmbuf_detach(struct rte_mbuf *m)
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
#define rte_pktmbuf_data_len(m)
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
#define MBUF_INVALID_PORT
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
__rte_experimental struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
#define RTE_PTR_DIFF(ptr1, ptr2)
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
rte_atomic16_t refcnt_atomic
#define PKT_TX_OFFLOAD_MASK
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
rte_atomic16_t refcnt_atomic
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
#define EXT_ATTACHED_MBUF
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)