35 #include <rte_compat.h>
37 #include <rte_config.h>
71 #define PKT_RX_VLAN (1ULL << 0)
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
154 #define PKT_RX_LRO (1ULL << 16)
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
178 #define PKT_RX_QINQ (1ULL << 20)
189 #define PKT_TX_UDP_SEG (1ULL << 42)
194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
200 #define PKT_TX_MACSEC (1ULL << 44)
210 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
211 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
212 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
213 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
215 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
216 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
228 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
241 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
243 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
248 #define PKT_TX_QINQ (1ULL << 49)
250 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
261 #define PKT_TX_TCP_SEG (1ULL << 50)
263 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
273 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
274 #define PKT_TX_TCP_CKSUM (1ULL << 52)
275 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
276 #define PKT_TX_UDP_CKSUM (3ULL << 52)
277 #define PKT_TX_L4_MASK (3ULL << 52)
285 #define PKT_TX_IP_CKSUM (1ULL << 54)
293 #define PKT_TX_IPV4 (1ULL << 55)
301 #define PKT_TX_IPV6 (1ULL << 56)
306 #define PKT_TX_VLAN (1ULL << 57)
308 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
316 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
323 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
330 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
336 #define PKT_TX_OFFLOAD_MASK ( \
337 PKT_TX_OUTER_IPV6 | \
338 PKT_TX_OUTER_IPV4 | \
339 PKT_TX_OUTER_IP_CKSUM | \
345 PKT_TX_IEEE1588_TMST | \
348 PKT_TX_TUNNEL_MASK | \
350 PKT_TX_SEC_OFFLOAD | \
356 #define EXT_ATTACHED_MBUF (1ULL << 61)
358 #define IND_ATTACHED_MBUF (1ULL << 62)
361 #define RTE_MBUF_PRIV_ALIGN 8
419 #define RTE_MBUF_DEFAULT_DATAROOM 2048
420 #define RTE_MBUF_DEFAULT_BUF_SIZE \
421 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
478 MARKER rx_descriptor_fields1;
553 MARKER cacheline1 __rte_cache_min_aligned;
618 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
650 #if RTE_CACHE_LINE_SIZE == 64
671 return mb->buf_iova + mb->data_off;
676 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
696 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
701 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
748 static inline void * __rte_experimental
761 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
767 #define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
774 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
782 #define RTE_MBUF_DIRECT(mb) \
783 (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
796 #ifdef RTE_LIBRTE_MBUF_DEBUG
799 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
804 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
808 #ifdef RTE_MBUF_REFCNT_ATOMIC
817 static inline uint16_t
837 static inline uint16_t
838 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
852 static inline uint16_t
865 return (uint16_t)value;
868 return __rte_mbuf_refcnt_update(m, value);
874 static inline uint16_t
875 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
884 static inline uint16_t
887 return __rte_mbuf_refcnt_update(m, value);
893 static inline uint16_t
918 static inline uint16_t
950 static inline uint16_t
957 return (uint16_t)value;
964 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
985 #define MBUF_RAW_ALLOC_CHECK(m) do { \
986 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
987 RTE_ASSERT((m)->next == NULL); \
988 RTE_ASSERT((m)->nb_segs == 1); \
989 __rte_mbuf_sanity_check(m, 0); \
1017 MBUF_RAW_ALLOC_CHECK(m);
1040 RTE_ASSERT(m->
next == NULL);
1049 __rte_mbuf_raw_free(
struct rte_mbuf *m)
1074 void *m,
unsigned i);
1132 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1174 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1188 static inline uint16_t
1209 static inline uint16_t
1228 m->data_off = (uint16_t)
RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1240 #define MBUF_INVALID_PORT UINT16_MAX
1242 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1277 rte_pktmbuf_reset(m);
1296 struct rte_mbuf **mbufs,
unsigned count)
1310 switch (count % 4) {
1312 while (idx != count) {
1313 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1314 rte_pktmbuf_reset(mbufs[idx]);
1318 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1319 rte_pktmbuf_reset(mbufs[idx]);
1323 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1324 rte_pktmbuf_reset(mbufs[idx]);
1328 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1329 rte_pktmbuf_reset(mbufs[idx]);
1379 if (addr <= buf_addr)
1454 static inline void __rte_experimental
1461 RTE_ASSERT(shinfo->
free_cb != NULL);
1464 m->buf_iova = buf_iova;
1481 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1520 mi->buf_iova = m->buf_iova;
1524 mi->data_off = m->data_off;
1550 __rte_pktmbuf_free_extbuf(
struct rte_mbuf *m)
1553 RTE_ASSERT(m->
shinfo != NULL);
1566 __rte_pktmbuf_free_direct(
struct rte_mbuf *m)
1598 uint32_t mbuf_size, buf_len;
1602 __rte_pktmbuf_free_extbuf(m);
1604 __rte_pktmbuf_free_direct(m);
1607 mbuf_size = (uint32_t)(
sizeof(
struct rte_mbuf) + priv_size);
1611 m->
buf_addr = (
char *)m + mbuf_size;
1613 m->
buf_len = (uint16_t)buf_len;
1643 if (m->
next != NULL) {
1650 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1655 if (m->
next != NULL) {
1669 __rte_pktmbuf_prefree_seg(
struct rte_mbuf *m)
1751 }
while ((md = md->
next) != NULL &&
1785 }
while ((m = m->
next) != NULL);
1828 while (m->
next != NULL)
1847 #define rte_pktmbuf_mtod_offset(m, t, o) \
1848 ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1862 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1873 #define rte_pktmbuf_iova_offset(m, o) \
1874 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1877 #define rte_pktmbuf_mtophys_offset(m, o) \
1878 rte_pktmbuf_iova_offset(m, o)
1887 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1890 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1900 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1910 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1938 m->data_off = (uint16_t)(m->data_off - len);
1942 return (
char *)m->
buf_addr + m->data_off;
1974 return (
char*) tail;
2002 m->data_off = (uint16_t)(m->data_off + len);
2004 return (
char *)m->
buf_addr + m->data_off;
2054 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
2055 uint32_t len,
void *buf);
2078 uint32_t off, uint32_t len,
void *buf)
2083 return __rte_pktmbuf_read(m, off, len, buf);
2112 cur_tail->
next = tail;
2141 uint64_t inner_l3_offset = m->
l2_len;
2171 !(ol_flags & PKT_TX_IP_CKSUM)))
2175 if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2197 size_t seg_len, copy_len;