35 #include <rte_compat.h>
37 #include <rte_config.h>
71 #define PKT_RX_VLAN (1ULL << 0)
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
154 #define PKT_RX_LRO (1ULL << 16)
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
178 #define PKT_RX_QINQ (1ULL << 20)
189 #define PKT_TX_UDP_SEG (1ULL << 42)
194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
200 #define PKT_TX_MACSEC (1ULL << 44)
210 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
211 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
212 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
213 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
215 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
216 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
228 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
241 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
243 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
248 #define PKT_TX_QINQ (1ULL << 49)
250 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
261 #define PKT_TX_TCP_SEG (1ULL << 50)
263 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
273 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
274 #define PKT_TX_TCP_CKSUM (1ULL << 52)
275 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
276 #define PKT_TX_UDP_CKSUM (3ULL << 52)
277 #define PKT_TX_L4_MASK (3ULL << 52)
285 #define PKT_TX_IP_CKSUM (1ULL << 54)
293 #define PKT_TX_IPV4 (1ULL << 55)
301 #define PKT_TX_IPV6 (1ULL << 56)
306 #define PKT_TX_VLAN (1ULL << 57)
308 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
316 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
323 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
330 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
336 #define PKT_TX_OFFLOAD_MASK ( \
339 PKT_TX_OUTER_IP_CKSUM | \
341 PKT_TX_IEEE1588_TMST | \
344 PKT_TX_TUNNEL_MASK | \
351 #define EXT_ATTACHED_MBUF (1ULL << 61)
353 #define IND_ATTACHED_MBUF (1ULL << 62)
356 #define RTE_MBUF_PRIV_ALIGN 8
414 #define RTE_MBUF_DEFAULT_DATAROOM 2048
415 #define RTE_MBUF_DEFAULT_BUF_SIZE \
416 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
473 MARKER rx_descriptor_fields1;
548 MARKER cacheline1 __rte_cache_min_aligned;
613 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
645 #if RTE_CACHE_LINE_SIZE == 64
666 return mb->buf_iova + mb->data_off;
671 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
691 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
696 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
738 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
744 #define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
751 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
759 #define RTE_MBUF_DIRECT(mb) \
760 (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
773 #ifdef RTE_LIBRTE_MBUF_DEBUG
776 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
781 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
785 #ifdef RTE_MBUF_REFCNT_ATOMIC
794 static inline uint16_t
814 static inline uint16_t
815 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
829 static inline uint16_t
842 return (uint16_t)value;
845 return __rte_mbuf_refcnt_update(m, value);
851 static inline uint16_t
852 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
861 static inline uint16_t
864 return __rte_mbuf_refcnt_update(m, value);
870 static inline uint16_t
895 static inline uint16_t
927 static inline uint16_t
934 return (uint16_t)value;
941 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
962 #define MBUF_RAW_ALLOC_CHECK(m) do { \
963 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
964 RTE_ASSERT((m)->next == NULL); \
965 RTE_ASSERT((m)->nb_segs == 1); \
966 __rte_mbuf_sanity_check(m, 0); \
994 MBUF_RAW_ALLOC_CHECK(m);
1017 RTE_ASSERT(m->
next == NULL);
1026 __rte_mbuf_raw_free(
struct rte_mbuf *m)
1051 void *m,
unsigned i);
1109 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1151 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1165 static inline uint16_t
1186 static inline uint16_t
1205 m->data_off = (uint16_t)
RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1217 #define MBUF_INVALID_PORT UINT16_MAX
1219 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1254 rte_pktmbuf_reset(m);
1273 struct rte_mbuf **mbufs,
unsigned count)
1287 switch (count % 4) {
1289 while (idx != count) {
1290 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1291 rte_pktmbuf_reset(mbufs[idx]);
1295 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1296 rte_pktmbuf_reset(mbufs[idx]);
1300 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1301 rte_pktmbuf_reset(mbufs[idx]);
1305 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1306 rte_pktmbuf_reset(mbufs[idx]);
1356 if (addr <= buf_addr)
1431 static inline void __rte_experimental
1438 RTE_ASSERT(shinfo->
free_cb != NULL);
1441 m->buf_iova = buf_iova;
1458 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1497 mi->buf_iova = m->buf_iova;
1501 mi->data_off = m->data_off;
1527 __rte_pktmbuf_free_extbuf(
struct rte_mbuf *m)
1530 RTE_ASSERT(m->
shinfo != NULL);
1543 __rte_pktmbuf_free_direct(
struct rte_mbuf *m)
1575 uint32_t mbuf_size, buf_len;
1579 __rte_pktmbuf_free_extbuf(m);
1581 __rte_pktmbuf_free_direct(m);
1584 mbuf_size = (uint32_t)(
sizeof(
struct rte_mbuf) + priv_size);
1588 m->
buf_addr = (
char *)m + mbuf_size;
1590 m->
buf_len = (uint16_t)buf_len;
1620 if (m->
next != NULL) {
1627 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1632 if (m->
next != NULL) {
1646 __rte_pktmbuf_prefree_seg(
struct rte_mbuf *m)
1728 }
while ((md = md->
next) != NULL &&
1762 }
while ((m = m->
next) != NULL);
1805 while (m->
next != NULL)
1824 #define rte_pktmbuf_mtod_offset(m, t, o) \
1825 ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1839 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1850 #define rte_pktmbuf_iova_offset(m, o) \
1851 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1854 #define rte_pktmbuf_mtophys_offset(m, o) \
1855 rte_pktmbuf_iova_offset(m, o)
1864 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1867 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1877 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1887 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1915 m->data_off = (uint16_t)(m->data_off - len);
1919 return (
char *)m->
buf_addr + m->data_off;
1951 return (
char*) tail;
1979 m->data_off = (uint16_t)(m->data_off + len);
1981 return (
char *)m->
buf_addr + m->data_off;
2031 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
2032 uint32_t len,
void *buf);
2055 uint32_t off, uint32_t len,
void *buf)
2060 return __rte_pktmbuf_read(m, off, len, buf);
2089 cur_tail->
next = tail;
2118 uint64_t inner_l3_offset = m->
l2_len;
2148 !(ol_flags & PKT_TX_IP_CKSUM)))
2152 if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2174 size_t seg_len, copy_len;