DPDK  18.11.11
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_mbuf_ptype.h>
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Packet Offload Features Flags. It also carry packet type information.
51  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
52  *
53  * - RX flags start at bit position zero, and get added to the left of previous
54  * flags.
55  * - The most-significant 3 bits are reserved for generic mbuf flags
56  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
57  * added to the right of the previously defined flags i.e. they should count
58  * downwards, not upwards.
59  *
60  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
61  * rte_get_tx_ol_flag_name().
62  */
63 
71 #define PKT_RX_VLAN (1ULL << 0)
72 
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
84 
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
93 
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
103 
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
113 
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
118 
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
128 
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
133 
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
148 
154 #define PKT_RX_LRO (1ULL << 16)
155 
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
160 
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
165 
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
170 
179 #define PKT_RX_QINQ (1ULL << 20)
180 
193 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
194 
195 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
196 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
197 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
198 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
199 
200 /* add new RX flags here */
201 
202 /* add new TX flags here */
203 
207 #define PKT_TX_METADATA (1ULL << 40)
208 
218 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
219 
225 #define PKT_TX_UDP_SEG (1ULL << 42)
226 
230 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
231 
236 #define PKT_TX_MACSEC (1ULL << 44)
237 
246 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
247 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
248 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
249 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
250 
251 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
252 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
253 
264 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
265 
277 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
278 /* add new TX TUNNEL type here */
279 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
280 
286 #define PKT_TX_QINQ (1ULL << 49)
287 /* this old name is deprecated */
288 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
289 
299 #define PKT_TX_TCP_SEG (1ULL << 50)
300 
301 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
311 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
312 #define PKT_TX_TCP_CKSUM (1ULL << 52)
313 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
314 #define PKT_TX_UDP_CKSUM (3ULL << 52)
315 #define PKT_TX_L4_MASK (3ULL << 52)
323 #define PKT_TX_IP_CKSUM (1ULL << 54)
324 
331 #define PKT_TX_IPV4 (1ULL << 55)
332 
339 #define PKT_TX_IPV6 (1ULL << 56)
340 
346 #define PKT_TX_VLAN (1ULL << 57)
347 /* this old name is deprecated */
348 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
349 
356 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
357 
363 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
364 
370 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
371 
376 #define PKT_TX_OFFLOAD_MASK ( \
377  PKT_TX_OUTER_IPV6 | \
378  PKT_TX_OUTER_IPV4 | \
379  PKT_TX_OUTER_IP_CKSUM | \
380  PKT_TX_VLAN_PKT | \
381  PKT_TX_IPV6 | \
382  PKT_TX_IPV4 | \
383  PKT_TX_IP_CKSUM | \
384  PKT_TX_L4_MASK | \
385  PKT_TX_IEEE1588_TMST | \
386  PKT_TX_TCP_SEG | \
387  PKT_TX_QINQ_PKT | \
388  PKT_TX_TUNNEL_MASK | \
389  PKT_TX_MACSEC | \
390  PKT_TX_SEC_OFFLOAD | \
391  PKT_TX_UDP_SEG | \
392  PKT_TX_OUTER_UDP_CKSUM | \
393  PKT_TX_METADATA)
394 
398 #define EXT_ATTACHED_MBUF (1ULL << 61)
399 
400 #define IND_ATTACHED_MBUF (1ULL << 62)
403 #define RTE_MBUF_PRIV_ALIGN 8
404 
413 const char *rte_get_rx_ol_flag_name(uint64_t mask);
414 
427 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
428 
439 const char *rte_get_tx_ol_flag_name(uint64_t mask);
440 
453 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
454 
461 #define RTE_MBUF_DEFAULT_DATAROOM 2048
462 #define RTE_MBUF_DEFAULT_BUF_SIZE \
463  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
464 
465 /* define a set of marker types that can be used to refer to set points in the
466  * mbuf */
467 __extension__
468 typedef void *MARKER[0];
469 __extension__
470 typedef uint8_t MARKER8[0];
471 __extension__
472 typedef uint64_t MARKER64[0];
478 struct rte_mbuf {
479  MARKER cacheline0;
480 
481  void *buf_addr;
489  union {
490  rte_iova_t buf_iova;
492  } __rte_aligned(sizeof(rte_iova_t));
493 
494  /* next 8 bytes are initialised on RX descriptor rearm */
495  MARKER64 rearm_data;
496  uint16_t data_off;
497 
508  union {
510  uint16_t refcnt;
511  };
512  uint16_t nb_segs;
517  uint16_t port;
518 
519  uint64_t ol_flags;
521  /* remaining bytes are set on RX when pulling packet from descriptor */
522  MARKER rx_descriptor_fields1;
523 
524  /*
525  * The packet type, which is the combination of outer/inner L2, L3, L4
526  * and tunnel types. The packet_type is about data really present in the
527  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
528  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
529  * vlan is stripped from the data.
530  */
532  union {
533  uint32_t packet_type;
534  struct {
535  uint32_t l2_type:4;
536  uint32_t l3_type:4;
537  uint32_t l4_type:4;
538  uint32_t tun_type:4;
540  union {
546  __extension__
547  struct {
548  uint8_t inner_l2_type:4;
550  uint8_t inner_l3_type:4;
552  };
553  };
554  uint32_t inner_l4_type:4;
555  };
556  };
557 
558  uint32_t pkt_len;
559  uint16_t data_len;
561  uint16_t vlan_tci;
562 
564  union {
565  union {
566  uint32_t rss;
567  struct {
568  union {
569  struct {
570  uint16_t hash;
571  uint16_t id;
572  };
573  uint32_t lo;
575  };
576  uint32_t hi;
580  } fdir;
581  struct {
582  uint32_t lo;
583  uint32_t hi;
588  } sched;
590  uint32_t usr;
591  } hash;
592  struct {
600  uint32_t tx_metadata;
601  uint32_t reserved;
602  };
603  };
604 
606  uint16_t vlan_tci_outer;
607 
608  uint16_t buf_len;
613  uint64_t timestamp;
614 
615  /* second cache line - fields only used in slow path or on TX */
616  MARKER cacheline1 __rte_cache_min_aligned;
617 
619  union {
620  void *userdata;
621  uint64_t udata64;
622  };
623 
624  struct rte_mempool *pool;
625  struct rte_mbuf *next;
627  /* fields to support TX offloads */
629  union {
630  uint64_t tx_offload;
631  __extension__
632  struct {
633  uint64_t l2_len:7;
637  uint64_t l3_len:9;
638  uint64_t l4_len:8;
639  uint64_t tso_segsz:16;
641  /* fields for TX offloading of tunnels */
642  uint64_t outer_l3_len:9;
643  uint64_t outer_l2_len:7;
645  /* uint64_t unused:8; */
646  };
647  };
648 
651  uint16_t priv_size;
652 
654  uint16_t timesync;
655 
657  uint32_t seqn;
658 
663 
665 
669 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
670 
676  void *fcb_opaque;
678 };
679 
681 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
682 
693 static inline void
695 {
696  rte_prefetch0(&m->cacheline0);
697 }
698 
710 static inline void
712 {
713 #if RTE_CACHE_LINE_SIZE == 64
714  rte_prefetch0(&m->cacheline1);
715 #else
716  RTE_SET_USED(m);
717 #endif
718 }
719 
720 
721 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
722 
731 static inline rte_iova_t
732 rte_mbuf_data_iova(const struct rte_mbuf *mb)
733 {
734  return mb->buf_iova + mb->data_off;
735 }
736 
737 __rte_deprecated
738 static inline phys_addr_t
739 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
740 {
741  return rte_mbuf_data_iova(mb);
742 }
743 
756 static inline rte_iova_t
758 {
759  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
760 }
761 
762 __rte_deprecated
763 static inline phys_addr_t
764 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
765 {
766  return rte_mbuf_data_iova_default(mb);
767 }
768 
777 static inline struct rte_mbuf *
779 {
780  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
781 }
782 
791 static inline char *
793 {
794  char *buffer_addr;
795  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
796  return buffer_addr;
797 }
798 
811 static inline void * __rte_experimental
813 {
814  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
815 }
816 
824 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
825 
830 #define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
831 
837 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
838 
845 #define RTE_MBUF_DIRECT(mb) \
846  (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
847 
856  uint16_t mbuf_priv_size;
857 };
858 
859 #ifdef RTE_LIBRTE_MBUF_DEBUG
860 
862 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
863 
864 #else /* RTE_LIBRTE_MBUF_DEBUG */
865 
867 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
868 
869 #endif /* RTE_LIBRTE_MBUF_DEBUG */
870 
871 #ifdef RTE_MBUF_REFCNT_ATOMIC
872 
880 static inline uint16_t
881 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
882 {
883  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
884 }
885 
893 static inline void
894 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
895 {
896  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
897 }
898 
899 /* internal */
900 static inline uint16_t
901 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
902 {
903  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
904 }
905 
915 static inline uint16_t
916 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
917 {
918  /*
919  * The atomic_add is an expensive operation, so we don't want to
920  * call it in the case where we know we are the unique holder of
921  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
922  * operation has to be used because concurrent accesses on the
923  * reference counter can occur.
924  */
925  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
926  ++value;
927  rte_mbuf_refcnt_set(m, (uint16_t)value);
928  return (uint16_t)value;
929  }
930 
931  return __rte_mbuf_refcnt_update(m, value);
932 }
933 
934 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
935 
936 /* internal */
937 static inline uint16_t
938 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
939 {
940  m->refcnt = (uint16_t)(m->refcnt + value);
941  return m->refcnt;
942 }
943 
947 static inline uint16_t
948 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
949 {
950  return __rte_mbuf_refcnt_update(m, value);
951 }
952 
956 static inline uint16_t
958 {
959  return m->refcnt;
960 }
961 
965 static inline void
966 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
967 {
968  m->refcnt = new_value;
969 }
970 
971 #endif /* RTE_MBUF_REFCNT_ATOMIC */
972 
981 static inline uint16_t
983 {
984  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
985 }
986 
995 static inline void
997  uint16_t new_value)
998 {
999  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
1000 }
1001 
1013 static inline uint16_t
1015  int16_t value)
1016 {
1017  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
1018  ++value;
1019  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
1020  return (uint16_t)value;
1021  }
1022 
1023  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
1024 }
1025 
1027 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1028  if ((m) != NULL) \
1029  rte_prefetch0(m); \
1030 } while (0)
1031 
1032 
1045 void
1046 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1047 
1048 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1049  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1050  RTE_ASSERT((m)->next == NULL); \
1051  RTE_ASSERT((m)->nb_segs == 1); \
1052  __rte_mbuf_sanity_check(m, 0); \
1053 } while (0)
1054 
1074 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1075 {
1076  struct rte_mbuf *m;
1077 
1078  if (rte_mempool_get(mp, (void **)&m) < 0)
1079  return NULL;
1080  MBUF_RAW_ALLOC_CHECK(m);
1081  return m;
1082 }
1083 
1098 static __rte_always_inline void
1100 {
1101  RTE_ASSERT(RTE_MBUF_DIRECT(m));
1102  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1103  RTE_ASSERT(m->next == NULL);
1104  RTE_ASSERT(m->nb_segs == 1);
1106  rte_mempool_put(m->pool, m);
1107 }
1108 
1128 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1129  void *m, unsigned i);
1130 
1131 
1149 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1150 
1185 struct rte_mempool *
1186 rte_pktmbuf_pool_create(const char *name, unsigned n,
1187  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1188  int socket_id);
1189 
1227 struct rte_mempool *
1228 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1229  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1230  int socket_id, const char *ops_name);
1231 
1243 static inline uint16_t
1245 {
1246  struct rte_pktmbuf_pool_private *mbp_priv;
1247 
1248  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1249  return mbp_priv->mbuf_data_room_size;
1250 }
1251 
1264 static inline uint16_t
1266 {
1267  struct rte_pktmbuf_pool_private *mbp_priv;
1268 
1269  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1270  return mbp_priv->mbuf_priv_size;
1271 }
1272 
1281 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1282 {
1283  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1284  (uint16_t)m->buf_len);
1285 }
1286 
1295 #define MBUF_INVALID_PORT UINT16_MAX
1296 
1297 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1298 {
1299  m->next = NULL;
1300  m->pkt_len = 0;
1301  m->tx_offload = 0;
1302  m->vlan_tci = 0;
1303  m->vlan_tci_outer = 0;
1304  m->nb_segs = 1;
1305  m->port = MBUF_INVALID_PORT;
1306 
1307  m->ol_flags = 0;
1308  m->packet_type = 0;
1310 
1311  m->data_len = 0;
1313 }
1314 
1328 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1329 {
1330  struct rte_mbuf *m;
1331  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1332  rte_pktmbuf_reset(m);
1333  return m;
1334 }
1335 
1350 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1351  struct rte_mbuf **mbufs, unsigned count)
1352 {
1353  unsigned idx = 0;
1354  int rc;
1355 
1356  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1357  if (unlikely(rc))
1358  return rc;
1359 
1360  /* To understand duff's device on loop unwinding optimization, see
1361  * https://en.wikipedia.org/wiki/Duff's_device.
1362  * Here while() loop is used rather than do() while{} to avoid extra
1363  * check if count is zero.
1364  */
1365  switch (count % 4) {
1366  case 0:
1367  while (idx != count) {
1368  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1369  rte_pktmbuf_reset(mbufs[idx]);
1370  idx++;
1371  /* fall-through */
1372  case 3:
1373  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1374  rte_pktmbuf_reset(mbufs[idx]);
1375  idx++;
1376  /* fall-through */
1377  case 2:
1378  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1379  rte_pktmbuf_reset(mbufs[idx]);
1380  idx++;
1381  /* fall-through */
1382  case 1:
1383  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1384  rte_pktmbuf_reset(mbufs[idx]);
1385  idx++;
1386  /* fall-through */
1387  }
1388  }
1389  return 0;
1390 }
1391 
1424 static inline struct rte_mbuf_ext_shared_info *
1425 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1427 {
1428  struct rte_mbuf_ext_shared_info *shinfo;
1429  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1430  void *addr;
1431 
1432  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1433  sizeof(uintptr_t));
1434  if (addr <= buf_addr)
1435  return NULL;
1436 
1437  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1438  shinfo->free_cb = free_cb;
1439  shinfo->fcb_opaque = fcb_opaque;
1440  rte_mbuf_ext_refcnt_set(shinfo, 1);
1441 
1442  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1443  return shinfo;
1444 }
1445 
1509 static inline void __rte_experimental
1510 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1511  rte_iova_t buf_iova, uint16_t buf_len,
1512  struct rte_mbuf_ext_shared_info *shinfo)
1513 {
1514  /* mbuf should not be read-only */
1515  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1516  RTE_ASSERT(shinfo->free_cb != NULL);
1517 
1518  m->buf_addr = buf_addr;
1519  m->buf_iova = buf_iova;
1520  m->buf_len = buf_len;
1521 
1522  m->data_len = 0;
1523  m->data_off = 0;
1524 
1526  m->shinfo = shinfo;
1527 }
1528 
1536 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1537 
1559 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1560 {
1561  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1562  rte_mbuf_refcnt_read(mi) == 1);
1563 
1564  if (RTE_MBUF_HAS_EXTBUF(m)) {
1566  mi->ol_flags = m->ol_flags;
1567  mi->shinfo = m->shinfo;
1568  } else {
1569  /* if m is not direct, get the mbuf that embeds the data */
1571  mi->priv_size = m->priv_size;
1572  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1573  }
1574 
1575  mi->buf_iova = m->buf_iova;
1576  mi->buf_addr = m->buf_addr;
1577  mi->buf_len = m->buf_len;
1578 
1579  mi->data_off = m->data_off;
1580  mi->data_len = m->data_len;
1581  mi->port = m->port;
1582  mi->vlan_tci = m->vlan_tci;
1583  mi->vlan_tci_outer = m->vlan_tci_outer;
1584  mi->tx_offload = m->tx_offload;
1585  mi->hash = m->hash;
1586 
1587  mi->next = NULL;
1588  mi->pkt_len = mi->data_len;
1589  mi->nb_segs = 1;
1590  mi->packet_type = m->packet_type;
1591  mi->timestamp = m->timestamp;
1592 
1593  __rte_mbuf_sanity_check(mi, 1);
1595 }
1596 
1604 static inline void
1605 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1606 {
1607  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1608  RTE_ASSERT(m->shinfo != NULL);
1609 
1610  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1611  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1612 }
1613 
1620 static inline void
1621 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1622 {
1623  struct rte_mbuf *md;
1624 
1625  RTE_ASSERT(RTE_MBUF_INDIRECT(m));
1626 
1627  md = rte_mbuf_from_indirect(m);
1628 
1629  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1630  md->next = NULL;
1631  md->nb_segs = 1;
1632  rte_mbuf_refcnt_set(md, 1);
1633  rte_mbuf_raw_free(md);
1634  }
1635 }
1636 
1650 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1651 {
1652  struct rte_mempool *mp = m->pool;
1653  uint32_t mbuf_size, buf_len;
1654  uint16_t priv_size;
1655 
1656  if (RTE_MBUF_HAS_EXTBUF(m))
1657  __rte_pktmbuf_free_extbuf(m);
1658  else
1659  __rte_pktmbuf_free_direct(m);
1660 
1661  priv_size = rte_pktmbuf_priv_size(mp);
1662  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1663  buf_len = rte_pktmbuf_data_room_size(mp);
1664 
1665  m->priv_size = priv_size;
1666  m->buf_addr = (char *)m + mbuf_size;
1667  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1668  m->buf_len = (uint16_t)buf_len;
1670  m->data_len = 0;
1671  m->ol_flags = 0;
1672 }
1673 
1688 static __rte_always_inline struct rte_mbuf *
1690 {
1692 
1693  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1694 
1695  if (!RTE_MBUF_DIRECT(m))
1696  rte_pktmbuf_detach(m);
1697 
1698  if (m->next != NULL) {
1699  m->next = NULL;
1700  m->nb_segs = 1;
1701  }
1702 
1703  return m;
1704 
1705  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1706 
1707  if (!RTE_MBUF_DIRECT(m))
1708  rte_pktmbuf_detach(m);
1709 
1710  if (m->next != NULL) {
1711  m->next = NULL;
1712  m->nb_segs = 1;
1713  }
1714  rte_mbuf_refcnt_set(m, 1);
1715 
1716  return m;
1717  }
1718  return NULL;
1719 }
1720 
1730 static __rte_always_inline void
1732 {
1733  m = rte_pktmbuf_prefree_seg(m);
1734  if (likely(m != NULL))
1735  rte_mbuf_raw_free(m);
1736 }
1737 
1747 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1748 {
1749  struct rte_mbuf *m_next;
1750 
1751  if (m != NULL)
1753 
1754  while (m != NULL) {
1755  m_next = m->next;
1757  m = m_next;
1758  }
1759 }
1760 
1778 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1779  struct rte_mempool *mp)
1780 {
1781  struct rte_mbuf *mc, *mi, **prev;
1782  uint32_t pktlen;
1783  uint16_t nseg;
1784 
1785  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1786  return NULL;
1787 
1788  mi = mc;
1789  prev = &mi->next;
1790  pktlen = md->pkt_len;
1791  nseg = 0;
1792 
1793  do {
1794  nseg++;
1795  rte_pktmbuf_attach(mi, md);
1796  *prev = mi;
1797  prev = &mi->next;
1798  } while ((md = md->next) != NULL &&
1799  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1800 
1801  *prev = NULL;
1802  mc->nb_segs = nseg;
1803  mc->pkt_len = pktlen;
1804 
1805  /* Allocation of new indirect segment failed */
1806  if (unlikely (mi == NULL)) {
1807  rte_pktmbuf_free(mc);
1808  return NULL;
1809  }
1810 
1811  __rte_mbuf_sanity_check(mc, 1);
1812  return mc;
1813 }
1814 
1826 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1827 {
1829 
1830  do {
1831  rte_mbuf_refcnt_update(m, v);
1832  } while ((m = m->next) != NULL);
1833 }
1834 
1843 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1844 {
1846  return m->data_off;
1847 }
1848 
1857 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1858 {
1860  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1861  m->data_len);
1862 }
1863 
1872 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1873 {
1875  while (m->next != NULL)
1876  m = m->next;
1877  return m;
1878 }
1879 
1894 #define rte_pktmbuf_mtod_offset(m, t, o) \
1895  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1896 
1909 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1910 
1920 #define rte_pktmbuf_iova_offset(m, o) \
1921  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1922 
1923 /* deprecated */
1924 #define rte_pktmbuf_mtophys_offset(m, o) \
1925  rte_pktmbuf_iova_offset(m, o)
1926 
1934 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1935 
1936 /* deprecated */
1937 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1938 
1947 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1948 
1957 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1958 
1974 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1975  uint16_t len)
1976 {
1978 
1979  if (unlikely(len > rte_pktmbuf_headroom(m)))
1980  return NULL;
1981 
1982  /* NB: elaborating the subtraction like this instead of using
1983  * -= allows us to ensure the result type is uint16_t
1984  * avoiding compiler warnings on gcc 8.1 at least */
1985  m->data_off = (uint16_t)(m->data_off - len);
1986  m->data_len = (uint16_t)(m->data_len + len);
1987  m->pkt_len = (m->pkt_len + len);
1988 
1989  return (char *)m->buf_addr + m->data_off;
1990 }
1991 
2007 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
2008 {
2009  void *tail;
2010  struct rte_mbuf *m_last;
2011 
2013 
2014  m_last = rte_pktmbuf_lastseg(m);
2015  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
2016  return NULL;
2017 
2018  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
2019  m_last->data_len = (uint16_t)(m_last->data_len + len);
2020  m->pkt_len = (m->pkt_len + len);
2021  return (char*) tail;
2022 }
2023 
2038 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
2039 {
2041 
2042  if (unlikely(len > m->data_len))
2043  return NULL;
2044 
2045  /* NB: elaborating the addition like this instead of using
2046  * += allows us to ensure the result type is uint16_t
2047  * avoiding compiler warnings on gcc 8.1 at least */
2048  m->data_len = (uint16_t)(m->data_len - len);
2049  m->data_off = (uint16_t)(m->data_off + len);
2050  m->pkt_len = (m->pkt_len - len);
2051  return (char *)m->buf_addr + m->data_off;
2052 }
2053 
2068 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2069 {
2070  struct rte_mbuf *m_last;
2071 
2073 
2074  m_last = rte_pktmbuf_lastseg(m);
2075  if (unlikely(len > m_last->data_len))
2076  return -1;
2077 
2078  m_last->data_len = (uint16_t)(m_last->data_len - len);
2079  m->pkt_len = (m->pkt_len - len);
2080  return 0;
2081 }
2082 
2092 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2093 {
2095  return m->nb_segs == 1;
2096 }
2097 
2101 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2102  uint32_t len, void *buf);
2103 
2124 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2125  uint32_t off, uint32_t len, void *buf)
2126 {
2127  if (likely(off + len <= rte_pktmbuf_data_len(m)))
2128  return rte_pktmbuf_mtod_offset(m, char *, off);
2129  else
2130  return __rte_pktmbuf_read(m, off, len, buf);
2131 }
2132 
2149 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2150 {
2151  struct rte_mbuf *cur_tail;
2152 
2153  /* Check for number-of-segments-overflow */
2154  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2155  return -EOVERFLOW;
2156 
2157  /* Chain 'tail' onto the old tail */
2158  cur_tail = rte_pktmbuf_lastseg(head);
2159  cur_tail->next = tail;
2160 
2161  /* accumulate number of segments and total length.
2162  * NB: elaborating the addition like this instead of using
2163  * -= allows us to ensure the result type is uint16_t
2164  * avoiding compiler warnings on gcc 8.1 at least */
2165  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2166  head->pkt_len += tail->pkt_len;
2167 
2168  /* pkt_len is only set in the head */
2169  tail->pkt_len = tail->data_len;
2170 
2171  return 0;
2172 }
2173 
2184 static inline int
2186 {
2187  uint64_t ol_flags = m->ol_flags;
2188  uint64_t inner_l3_offset = m->l2_len;
2189 
2190  /* Does packet set any of available offloads? */
2191  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2192  return 0;
2193 
2194  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2195  /* NB: elaborating the addition like this instead of using
2196  * += gives the result uint64_t type instead of int,
2197  * avoiding compiler warnings on gcc 8.1 at least */
2198  inner_l3_offset = inner_l3_offset + m->outer_l2_len +
2199  m->outer_l3_len;
2200 
2201  /* Headers are fragmented */
2202  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
2203  return -ENOTSUP;
2204 
2205  /* IP checksum can be counted only for IPv4 packet */
2206  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
2207  return -EINVAL;
2208 
2209  /* IP type not set when required */
2210  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
2211  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2212  return -EINVAL;
2213 
2214  /* Check requirements for TSO packet */
2215  if (ol_flags & PKT_TX_TCP_SEG)
2216  if ((m->tso_segsz == 0) ||
2217  ((ol_flags & PKT_TX_IPV4) &&
2218  !(ol_flags & PKT_TX_IP_CKSUM)))
2219  return -EINVAL;
2220 
2221  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2222  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2223  !(ol_flags & PKT_TX_OUTER_IPV4))
2224  return -EINVAL;
2225 
2226  return 0;
2227 }
2228 
2241 static inline int
2243 {
2244  size_t seg_len, copy_len;
2245  struct rte_mbuf *m;
2246  struct rte_mbuf *m_next;
2247  char *buffer;
2248 
2249  if (rte_pktmbuf_is_contiguous(mbuf))
2250  return 0;
2251 
2252  /* Extend first segment to the total packet length */
2253  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2254 
2255  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2256  return -1;
2257 
2258  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2259  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2260 
2261  /* Append data from next segments to the first one */
2262  m = mbuf->next;
2263  while (m != NULL) {
2264  m_next = m->next;
2265 
2266  seg_len = rte_pktmbuf_data_len(m);
2267  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2268  buffer += seg_len;
2269 
2271  m = m_next;
2272  }
2273 
2274  mbuf->next = NULL;
2275  mbuf->nb_segs = 1;
2276 
2277  return 0;
2278 }
2279 
2294 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2295 
2296 #ifdef __cplusplus
2297 }
2298 #endif
2299 
2300 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
Definition: rte_mbuf.h:662
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:732
struct rte_mbuf * next
Definition: rte_mbuf.h:625
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:855
uint64_t timestamp
Definition: rte_mbuf.h:613
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:606
#define __rte_always_inline
Definition: rte_common.h:146
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:256
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1328
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:541
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:468
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:845
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:400
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:491
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1265
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2185
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1747
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:187
uint64_t l2_len
Definition: rte_mbuf.h:633
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1778
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1731
void * buf_addr
Definition: rte_mbuf.h:481
uint32_t l2_type
Definition: rte_mbuf.h:535
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:778
uint16_t data_len
Definition: rte_mbuf.h:559
uint32_t lo
Definition: rte_mbuf.h:573
rte_mbuf_extbuf_free_callback_t free_cb
Definition: rte_mbuf.h:675
void * userdata
Definition: rte_mbuf.h:620
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:2149
uint8_t inner_l2_type
Definition: rte_mbuf.h:548
uint64_t tso_segsz
Definition: rte_mbuf.h:639
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:470
uint64_t l4_len
Definition: rte_mbuf.h:638
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1843
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1350
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1281
uint32_t cache_size
Definition: rte_mempool.h:230
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:356
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:711
#define PKT_TX_IPV6
Definition: rte_mbuf.h:339
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:1014
uint16_t nb_segs
Definition: rte_mbuf.h:512
uint16_t port
Definition: rte_mbuf.h:517
uint64_t outer_l3_len
Definition: rte_mbuf.h:642
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1689
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2092
uint64_t l3_len
Definition: rte_mbuf.h:637
uint32_t l4_type
Definition: rte_mbuf.h:537
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:158
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:363
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1857
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1099
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:299
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:651
uint16_t timesync
Definition: rte_mbuf.h:654
uint32_t hi
Definition: rte_mbuf.h:576
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:472
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:414
#define PKT_TX_IPV4
Definition: rte_mbuf.h:331
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:867
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:957
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:2242
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1483
RTE_STD_C11 union rte_mbuf::@173 __rte_aligned
uint64_t outer_l2_len
Definition: rte_mbuf.h:643
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:270
uint16_t refcnt
Definition: rte_mbuf.h:510
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2038
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1947
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1559
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1455
uint32_t tun_type
Definition: rte_mbuf.h:538
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:351
uint64_t ol_flags
Definition: rte_mbuf.h:519
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1650
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:982
uint32_t pkt_len
Definition: rte_mbuf.h:558
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:315
uint16_t buf_len
Definition: rte_mbuf.h:608
uint32_t inner_l4_type
Definition: rte_mbuf.h:554
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1957
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1909
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:948
uint32_t packet_type
Definition: rte_mbuf.h:533
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1295
uint32_t seqn
Definition: rte_mbuf.h:657
#define EXT_ATTACHED_MBUF
Definition: rte_mbuf.h:398
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1244
uint8_t inner_l3_type
Definition: rte_mbuf.h:550
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1425
#define RTE_MBUF_HAS_EXTBUF(mb)
Definition: rte_mbuf.h:837
#define RTE_STD_C11
Definition: rte_common.h:37
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:323
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:624
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:996
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2007
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:966
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
Definition: rte_mbuf.h:669
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:757
uint32_t tx_metadata
Definition: rte_mbuf.h:600
uint32_t rss
Definition: rte_mbuf.h:566
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2068
uint64_t rte_iova_t
Definition: rte_memory.h:82
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:792
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:2124
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1974
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1074
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1826
uint64_t phys_addr_t
Definition: rte_memory.h:73
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:163
#define __rte_cache_aligned
Definition: rte_memory.h:66
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1872
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:376
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1609
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1324
uint64_t udata64
Definition: rte_mbuf.h:621
uint32_t l3_type
Definition: rte_mbuf.h:536
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:170
struct rte_mbuf::@176::@188::@191 fdir
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:694
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:509
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1637
uint64_t tx_offload
Definition: rte_mbuf.h:630
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint16_t vlan_tci
Definition: rte_mbuf.h:561
static void *__rte_experimental rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:812
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:830
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:677
static void __rte_experimental rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1510
struct rte_mbuf::@176::@188::@192 sched
#define RTE_SET_USED(x)
Definition: rte_common.h:87
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1894
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)