DPDK  19.08.2
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_byteorder.h>
44 #include <rte_mbuf_ptype.h>
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
50 /*
51  * Packet Offload Features Flags. It also carry packet type information.
52  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
53  *
54  * - RX flags start at bit position zero, and get added to the left of previous
55  * flags.
56  * - The most-significant 3 bits are reserved for generic mbuf flags
57  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
58  * added to the right of the previously defined flags i.e. they should count
59  * downwards, not upwards.
60  *
61  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
62  * rte_get_tx_ol_flag_name().
63  */
64 
72 #define PKT_RX_VLAN (1ULL << 0)
73 
74 #define PKT_RX_RSS_HASH (1ULL << 1)
75 #define PKT_RX_FDIR (1ULL << 2)
84 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
85 
93 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
94 
95 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
103 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
104 
113 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
114 
115 #define PKT_RX_IP_CKSUM_UNKNOWN 0
116 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
117 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
118 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
119 
128 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
129 
130 #define PKT_RX_L4_CKSUM_UNKNOWN 0
131 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
132 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
133 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
134 
135 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
136 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
137 #define PKT_RX_FDIR_ID (1ULL << 13)
138 #define PKT_RX_FDIR_FLX (1ULL << 14)
148 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
149 
155 #define PKT_RX_LRO (1ULL << 16)
156 
160 #define PKT_RX_TIMESTAMP (1ULL << 17)
161 
165 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
166 
170 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
171 
180 #define PKT_RX_QINQ (1ULL << 20)
181 
194 #define PKT_RX_OUTER_L4_CKSUM_MASK ((1ULL << 21) | (1ULL << 22))
195 
196 #define PKT_RX_OUTER_L4_CKSUM_UNKNOWN 0
197 #define PKT_RX_OUTER_L4_CKSUM_BAD (1ULL << 21)
198 #define PKT_RX_OUTER_L4_CKSUM_GOOD (1ULL << 22)
199 #define PKT_RX_OUTER_L4_CKSUM_INVALID ((1ULL << 21) | (1ULL << 22))
200 
201 /* add new RX flags here */
202 
203 /* add new TX flags here */
204 
208 #define PKT_TX_METADATA (1ULL << 40)
209 
219 #define PKT_TX_OUTER_UDP_CKSUM (1ULL << 41)
220 
226 #define PKT_TX_UDP_SEG (1ULL << 42)
227 
231 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
232 
237 #define PKT_TX_MACSEC (1ULL << 44)
238 
247 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
248 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
249 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
250 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
251 
252 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
253 #define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
254 
265 #define PKT_TX_TUNNEL_IP (0xDULL << 45)
266 
278 #define PKT_TX_TUNNEL_UDP (0xEULL << 45)
279 /* add new TX TUNNEL type here */
280 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
281 
287 #define PKT_TX_QINQ (1ULL << 49)
288 /* this old name is deprecated */
289 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
290 
300 #define PKT_TX_TCP_SEG (1ULL << 50)
301 
302 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
312 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
313 #define PKT_TX_TCP_CKSUM (1ULL << 52)
314 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
315 #define PKT_TX_UDP_CKSUM (3ULL << 52)
316 #define PKT_TX_L4_MASK (3ULL << 52)
324 #define PKT_TX_IP_CKSUM (1ULL << 54)
325 
332 #define PKT_TX_IPV4 (1ULL << 55)
333 
340 #define PKT_TX_IPV6 (1ULL << 56)
341 
347 #define PKT_TX_VLAN (1ULL << 57)
348 /* this old name is deprecated */
349 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
350 
357 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
358 
364 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
365 
371 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
372 
377 #define PKT_TX_OFFLOAD_MASK ( \
378  PKT_TX_OUTER_IPV6 | \
379  PKT_TX_OUTER_IPV4 | \
380  PKT_TX_OUTER_IP_CKSUM | \
381  PKT_TX_VLAN_PKT | \
382  PKT_TX_IPV6 | \
383  PKT_TX_IPV4 | \
384  PKT_TX_IP_CKSUM | \
385  PKT_TX_L4_MASK | \
386  PKT_TX_IEEE1588_TMST | \
387  PKT_TX_TCP_SEG | \
388  PKT_TX_QINQ_PKT | \
389  PKT_TX_TUNNEL_MASK | \
390  PKT_TX_MACSEC | \
391  PKT_TX_SEC_OFFLOAD | \
392  PKT_TX_UDP_SEG | \
393  PKT_TX_OUTER_UDP_CKSUM | \
394  PKT_TX_METADATA)
395 
399 #define EXT_ATTACHED_MBUF (1ULL << 61)
400 
401 #define IND_ATTACHED_MBUF (1ULL << 62)
404 #define RTE_MBUF_PRIV_ALIGN 8
405 
414 const char *rte_get_rx_ol_flag_name(uint64_t mask);
415 
428 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
429 
440 const char *rte_get_tx_ol_flag_name(uint64_t mask);
441 
454 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
455 
462 #define RTE_MBUF_DEFAULT_DATAROOM 2048
463 #define RTE_MBUF_DEFAULT_BUF_SIZE \
464  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
465 
466 /* define a set of marker types that can be used to refer to set points in the
467  * mbuf */
468 __extension__
469 typedef void *MARKER[0];
470 __extension__
471 typedef uint8_t MARKER8[0];
472 __extension__
473 typedef uint64_t MARKER64[0];
477  uint32_t queue_id;
478  uint8_t traffic_class;
482  uint8_t color;
484  uint16_t reserved;
485 };
491 enum {
492  RTE_MBUF_L2_LEN_BITS = 7,
493  RTE_MBUF_L3_LEN_BITS = 9,
494  RTE_MBUF_L4_LEN_BITS = 8,
495  RTE_MBUF_TSO_SEGSZ_BITS = 16,
496  RTE_MBUF_OUTL3_LEN_BITS = 9,
497  RTE_MBUF_OUTL2_LEN_BITS = 7,
498  RTE_MBUF_TXOFLD_UNUSED_BITS = sizeof(uint64_t) * CHAR_BIT -
499  RTE_MBUF_L2_LEN_BITS -
500  RTE_MBUF_L3_LEN_BITS -
501  RTE_MBUF_L4_LEN_BITS -
502  RTE_MBUF_TSO_SEGSZ_BITS -
503  RTE_MBUF_OUTL3_LEN_BITS -
504  RTE_MBUF_OUTL2_LEN_BITS,
505 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
506  RTE_MBUF_L2_LEN_OFS =
507  sizeof(uint64_t) * CHAR_BIT - RTE_MBUF_L2_LEN_BITS,
508  RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS - RTE_MBUF_L3_LEN_BITS,
509  RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS - RTE_MBUF_L4_LEN_BITS,
510  RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS - RTE_MBUF_TSO_SEGSZ_BITS,
511  RTE_MBUF_OUTL3_LEN_OFS =
512  RTE_MBUF_TSO_SEGSZ_OFS - RTE_MBUF_OUTL3_LEN_BITS,
513  RTE_MBUF_OUTL2_LEN_OFS =
514  RTE_MBUF_OUTL3_LEN_OFS - RTE_MBUF_OUTL2_LEN_BITS,
515  RTE_MBUF_TXOFLD_UNUSED_OFS =
516  RTE_MBUF_OUTL2_LEN_OFS - RTE_MBUF_TXOFLD_UNUSED_BITS,
517 #else
518  RTE_MBUF_L2_LEN_OFS = 0,
519  RTE_MBUF_L3_LEN_OFS = RTE_MBUF_L2_LEN_OFS + RTE_MBUF_L2_LEN_BITS,
520  RTE_MBUF_L4_LEN_OFS = RTE_MBUF_L3_LEN_OFS + RTE_MBUF_L3_LEN_BITS,
521  RTE_MBUF_TSO_SEGSZ_OFS = RTE_MBUF_L4_LEN_OFS + RTE_MBUF_L4_LEN_BITS,
522  RTE_MBUF_OUTL3_LEN_OFS =
523  RTE_MBUF_TSO_SEGSZ_OFS + RTE_MBUF_TSO_SEGSZ_BITS,
524  RTE_MBUF_OUTL2_LEN_OFS =
525  RTE_MBUF_OUTL3_LEN_OFS + RTE_MBUF_OUTL3_LEN_BITS,
526  RTE_MBUF_TXOFLD_UNUSED_OFS =
527  RTE_MBUF_OUTL2_LEN_OFS + RTE_MBUF_OUTL2_LEN_BITS,
528 #endif
529 };
530 
534 struct rte_mbuf {
535  MARKER cacheline0;
536 
537  void *buf_addr;
545  union {
546  rte_iova_t buf_iova;
548  } __rte_aligned(sizeof(rte_iova_t));
549 
550  /* next 8 bytes are initialised on RX descriptor rearm */
551  MARKER64 rearm_data;
552  uint16_t data_off;
553 
564  union {
566  uint16_t refcnt;
567  };
568  uint16_t nb_segs;
573  uint16_t port;
574 
575  uint64_t ol_flags;
577  /* remaining bytes are set on RX when pulling packet from descriptor */
578  MARKER rx_descriptor_fields1;
579 
580  /*
581  * The packet type, which is the combination of outer/inner L2, L3, L4
582  * and tunnel types. The packet_type is about data really present in the
583  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
584  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
585  * vlan is stripped from the data.
586  */
588  union {
589  uint32_t packet_type;
590  struct {
591  uint32_t l2_type:4;
592  uint32_t l3_type:4;
593  uint32_t l4_type:4;
594  uint32_t tun_type:4;
596  union {
602  __extension__
603  struct {
604  uint8_t inner_l2_type:4;
606  uint8_t inner_l3_type:4;
608  };
609  };
610  uint32_t inner_l4_type:4;
611  };
612  };
613 
614  uint32_t pkt_len;
615  uint16_t data_len;
617  uint16_t vlan_tci;
618 
620  union {
621  union {
622  uint32_t rss;
623  struct {
624  union {
625  struct {
626  uint16_t hash;
627  uint16_t id;
628  };
629  uint32_t lo;
631  };
632  uint32_t hi;
636  } fdir;
639  struct {
640  uint32_t reserved1;
641  uint16_t reserved2;
642  uint16_t txq;
647  } txadapter;
649  uint32_t usr;
650  } hash;
651  struct {
659  uint32_t tx_metadata;
660  uint32_t reserved;
661  };
662  };
663 
665  uint16_t vlan_tci_outer;
666 
667  uint16_t buf_len;
674  uint64_t timestamp;
675 
676  /* second cache line - fields only used in slow path or on TX */
677  MARKER cacheline1 __rte_cache_min_aligned;
678 
680  union {
681  void *userdata;
682  uint64_t udata64;
683  };
684 
685  struct rte_mempool *pool;
686  struct rte_mbuf *next;
688  /* fields to support TX offloads */
690  union {
691  uint64_t tx_offload;
692  __extension__
693  struct {
694  uint64_t l2_len:RTE_MBUF_L2_LEN_BITS;
698  uint64_t l3_len:RTE_MBUF_L3_LEN_BITS;
700  uint64_t l4_len:RTE_MBUF_L4_LEN_BITS;
702  uint64_t tso_segsz:RTE_MBUF_TSO_SEGSZ_BITS;
705  /*
706  * Fields for Tx offloading of tunnels.
707  * These are undefined for packets which don't request
708  * any tunnel offloads (outer IP or UDP checksum,
709  * tunnel TSO).
710  *
711  * PMDs should not use these fields unconditionally
712  * when calculating offsets.
713  *
714  * Applications are expected to set appropriate tunnel
715  * offload flags when they fill in these fields.
716  */
717  uint64_t outer_l3_len:RTE_MBUF_OUTL3_LEN_BITS;
719  uint64_t outer_l2_len:RTE_MBUF_OUTL2_LEN_BITS;
722  /* uint64_t unused:RTE_MBUF_TXOFLD_UNUSED_BITS; */
723  };
724  };
725 
728  uint16_t priv_size;
729 
731  uint16_t timesync;
732 
734  uint32_t seqn;
735 
740 
742 
746 typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
747 
753  void *fcb_opaque;
755 };
756 
758 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
759 
770 static inline void
772 {
773  rte_prefetch0(&m->cacheline0);
774 }
775 
787 static inline void
789 {
790 #if RTE_CACHE_LINE_SIZE == 64
791  rte_prefetch0(&m->cacheline1);
792 #else
793  RTE_SET_USED(m);
794 #endif
795 }
796 
797 
798 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
799 
808 static inline rte_iova_t
809 rte_mbuf_data_iova(const struct rte_mbuf *mb)
810 {
811  return mb->buf_iova + mb->data_off;
812 }
813 
814 __rte_deprecated
815 static inline phys_addr_t
816 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
817 {
818  return rte_mbuf_data_iova(mb);
819 }
820 
833 static inline rte_iova_t
835 {
836  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
837 }
838 
839 __rte_deprecated
840 static inline phys_addr_t
841 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
842 {
843  return rte_mbuf_data_iova_default(mb);
844 }
845 
854 static inline struct rte_mbuf *
856 {
857  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
858 }
859 
880 __rte_experimental
881 static inline char *
882 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
883 {
884  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
885 }
886 
898 __rte_experimental
899 static inline char *
901 {
902  /* gcc complains about calling this experimental function even
903  * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
904  */
905 #ifdef ALLOW_EXPERIMENTAL_API
906  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
907 #else
908  return NULL;
909 #endif
910 }
911 
925 static inline char *
927 {
928 #ifdef ALLOW_EXPERIMENTAL_API
929  return rte_mbuf_buf_addr(md, md->pool);
930 #else
931  char *buffer_addr;
932  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
933  return buffer_addr;
934 #endif
935 }
936 
949 __rte_experimental
950 static inline void *
952 {
953  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
954 }
955 
963 #define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
964 
970 #define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
971 
978 #define RTE_MBUF_DIRECT(mb) \
979  (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
980 
989  uint16_t mbuf_priv_size;
990 };
991 
992 #ifdef RTE_LIBRTE_MBUF_DEBUG
993 
995 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
996 
997 #else /* RTE_LIBRTE_MBUF_DEBUG */
998 
1000 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
1001 
1002 #endif /* RTE_LIBRTE_MBUF_DEBUG */
1003 
1004 #ifdef RTE_MBUF_REFCNT_ATOMIC
1005 
1013 static inline uint16_t
1014 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
1015 {
1016  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
1017 }
1018 
1026 static inline void
1027 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1028 {
1029  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
1030 }
1031 
1032 /* internal */
1033 static inline uint16_t
1034 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1035 {
1036  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
1037 }
1038 
1048 static inline uint16_t
1049 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1050 {
1051  /*
1052  * The atomic_add is an expensive operation, so we don't want to
1053  * call it in the case where we know we are the unique holder of
1054  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
1055  * operation has to be used because concurrent accesses on the
1056  * reference counter can occur.
1057  */
1058  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1059  ++value;
1060  rte_mbuf_refcnt_set(m, (uint16_t)value);
1061  return (uint16_t)value;
1062  }
1063 
1064  return __rte_mbuf_refcnt_update(m, value);
1065 }
1066 
1067 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
1068 
1069 /* internal */
1070 static inline uint16_t
1071 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1072 {
1073  m->refcnt = (uint16_t)(m->refcnt + value);
1074  return m->refcnt;
1075 }
1076 
1080 static inline uint16_t
1081 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1082 {
1083  return __rte_mbuf_refcnt_update(m, value);
1084 }
1085 
1089 static inline uint16_t
1091 {
1092  return m->refcnt;
1093 }
1094 
1098 static inline void
1099 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1100 {
1101  m->refcnt = new_value;
1102 }
1103 
1104 #endif /* RTE_MBUF_REFCNT_ATOMIC */
1105 
1114 static inline uint16_t
1116 {
1117  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
1118 }
1119 
1128 static inline void
1130  uint16_t new_value)
1131 {
1132  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
1133 }
1134 
1146 static inline uint16_t
1148  int16_t value)
1149 {
1150  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
1151  ++value;
1152  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
1153  return (uint16_t)value;
1154  }
1155 
1156  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
1157 }
1158 
1160 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1161  if ((m) != NULL) \
1162  rte_prefetch0(m); \
1163 } while (0)
1164 
1165 
1178 void
1179 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1180 
1200 __rte_experimental
1201 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
1202  const char **reason);
1203 
1204 #define MBUF_RAW_ALLOC_CHECK(m) do { \
1205  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
1206  RTE_ASSERT((m)->next == NULL); \
1207  RTE_ASSERT((m)->nb_segs == 1); \
1208  __rte_mbuf_sanity_check(m, 0); \
1209 } while (0)
1210 
1230 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
1231 {
1232  struct rte_mbuf *m;
1233 
1234  if (rte_mempool_get(mp, (void **)&m) < 0)
1235  return NULL;
1236  MBUF_RAW_ALLOC_CHECK(m);
1237  return m;
1238 }
1239 
1254 static __rte_always_inline void
1256 {
1257  RTE_ASSERT(RTE_MBUF_DIRECT(m));
1258  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
1259  RTE_ASSERT(m->next == NULL);
1260  RTE_ASSERT(m->nb_segs == 1);
1262  rte_mempool_put(m->pool, m);
1263 }
1264 
1284 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1285  void *m, unsigned i);
1286 
1287 
1305 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1306 
1341 struct rte_mempool *
1342 rte_pktmbuf_pool_create(const char *name, unsigned n,
1343  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1344  int socket_id);
1345 
1383 struct rte_mempool *
1384 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1385  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1386  int socket_id, const char *ops_name);
1387 
1399 static inline uint16_t
1401 {
1402  struct rte_pktmbuf_pool_private *mbp_priv;
1403 
1404  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1405  return mbp_priv->mbuf_data_room_size;
1406 }
1407 
1420 static inline uint16_t
1422 {
1423  struct rte_pktmbuf_pool_private *mbp_priv;
1424 
1425  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1426  return mbp_priv->mbuf_priv_size;
1427 }
1428 
1437 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1438 {
1439  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1440  (uint16_t)m->buf_len);
1441 }
1442 
1451 #define MBUF_INVALID_PORT UINT16_MAX
1452 
1453 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1454 {
1455  m->next = NULL;
1456  m->pkt_len = 0;
1457  m->tx_offload = 0;
1458  m->vlan_tci = 0;
1459  m->vlan_tci_outer = 0;
1460  m->nb_segs = 1;
1461  m->port = MBUF_INVALID_PORT;
1462 
1463  m->ol_flags = 0;
1464  m->packet_type = 0;
1466 
1467  m->data_len = 0;
1469 }
1470 
1484 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1485 {
1486  struct rte_mbuf *m;
1487  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1488  rte_pktmbuf_reset(m);
1489  return m;
1490 }
1491 
1506 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1507  struct rte_mbuf **mbufs, unsigned count)
1508 {
1509  unsigned idx = 0;
1510  int rc;
1511 
1512  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1513  if (unlikely(rc))
1514  return rc;
1515 
1516  /* To understand duff's device on loop unwinding optimization, see
1517  * https://en.wikipedia.org/wiki/Duff's_device.
1518  * Here while() loop is used rather than do() while{} to avoid extra
1519  * check if count is zero.
1520  */
1521  switch (count % 4) {
1522  case 0:
1523  while (idx != count) {
1524  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1525  rte_pktmbuf_reset(mbufs[idx]);
1526  idx++;
1527  /* fall-through */
1528  case 3:
1529  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1530  rte_pktmbuf_reset(mbufs[idx]);
1531  idx++;
1532  /* fall-through */
1533  case 2:
1534  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1535  rte_pktmbuf_reset(mbufs[idx]);
1536  idx++;
1537  /* fall-through */
1538  case 1:
1539  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1540  rte_pktmbuf_reset(mbufs[idx]);
1541  idx++;
1542  /* fall-through */
1543  }
1544  }
1545  return 0;
1546 }
1547 
1580 static inline struct rte_mbuf_ext_shared_info *
1581 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1583 {
1584  struct rte_mbuf_ext_shared_info *shinfo;
1585  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1586  void *addr;
1587 
1588  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1589  sizeof(uintptr_t));
1590  if (addr <= buf_addr)
1591  return NULL;
1592 
1593  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1594  shinfo->free_cb = free_cb;
1595  shinfo->fcb_opaque = fcb_opaque;
1596  rte_mbuf_ext_refcnt_set(shinfo, 1);
1597 
1598  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1599  return shinfo;
1600 }
1601 
1658 static inline void
1659 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1660  rte_iova_t buf_iova, uint16_t buf_len,
1661  struct rte_mbuf_ext_shared_info *shinfo)
1662 {
1663  /* mbuf should not be read-only */
1664  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1665  RTE_ASSERT(shinfo->free_cb != NULL);
1666 
1667  m->buf_addr = buf_addr;
1668  m->buf_iova = buf_iova;
1669  m->buf_len = buf_len;
1670 
1671  m->data_len = 0;
1672  m->data_off = 0;
1673 
1675  m->shinfo = shinfo;
1676 }
1677 
1685 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1686 
1708 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1709 {
1710  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1711  rte_mbuf_refcnt_read(mi) == 1);
1712 
1713  if (RTE_MBUF_HAS_EXTBUF(m)) {
1715  mi->ol_flags = m->ol_flags;
1716  mi->shinfo = m->shinfo;
1717  } else {
1718  /* if m is not direct, get the mbuf that embeds the data */
1720  mi->priv_size = m->priv_size;
1721  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1722  }
1723 
1724  mi->buf_iova = m->buf_iova;
1725  mi->buf_addr = m->buf_addr;
1726  mi->buf_len = m->buf_len;
1727 
1728  mi->data_off = m->data_off;
1729  mi->data_len = m->data_len;
1730  mi->port = m->port;
1731  mi->vlan_tci = m->vlan_tci;
1732  mi->vlan_tci_outer = m->vlan_tci_outer;
1733  mi->tx_offload = m->tx_offload;
1734  mi->hash = m->hash;
1735 
1736  mi->next = NULL;
1737  mi->pkt_len = mi->data_len;
1738  mi->nb_segs = 1;
1739  mi->packet_type = m->packet_type;
1740  mi->timestamp = m->timestamp;
1741 
1742  __rte_mbuf_sanity_check(mi, 1);
1744 }
1745 
1753 static inline void
1754 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1755 {
1756  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1757  RTE_ASSERT(m->shinfo != NULL);
1758 
1759  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1760  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1761 }
1762 
1769 static inline void
1770 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1771 {
1772  struct rte_mbuf *md;
1773 
1774  RTE_ASSERT(RTE_MBUF_CLONED(m));
1775 
1776  md = rte_mbuf_from_indirect(m);
1777 
1778  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1779  md->next = NULL;
1780  md->nb_segs = 1;
1781  rte_mbuf_refcnt_set(md, 1);
1782  rte_mbuf_raw_free(md);
1783  }
1784 }
1785 
1799 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1800 {
1801  struct rte_mempool *mp = m->pool;
1802  uint32_t mbuf_size, buf_len;
1803  uint16_t priv_size;
1804 
1805  if (RTE_MBUF_HAS_EXTBUF(m))
1806  __rte_pktmbuf_free_extbuf(m);
1807  else
1808  __rte_pktmbuf_free_direct(m);
1809 
1810  priv_size = rte_pktmbuf_priv_size(mp);
1811  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1812  buf_len = rte_pktmbuf_data_room_size(mp);
1813 
1814  m->priv_size = priv_size;
1815  m->buf_addr = (char *)m + mbuf_size;
1816  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1817  m->buf_len = (uint16_t)buf_len;
1819  m->data_len = 0;
1820  m->ol_flags = 0;
1821 }
1822 
1837 static __rte_always_inline struct rte_mbuf *
1839 {
1841 
1842  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1843 
1844  if (!RTE_MBUF_DIRECT(m))
1845  rte_pktmbuf_detach(m);
1846 
1847  if (m->next != NULL) {
1848  m->next = NULL;
1849  m->nb_segs = 1;
1850  }
1851 
1852  return m;
1853 
1854  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1855 
1856  if (!RTE_MBUF_DIRECT(m))
1857  rte_pktmbuf_detach(m);
1858 
1859  if (m->next != NULL) {
1860  m->next = NULL;
1861  m->nb_segs = 1;
1862  }
1863  rte_mbuf_refcnt_set(m, 1);
1864 
1865  return m;
1866  }
1867  return NULL;
1868 }
1869 
1879 static __rte_always_inline void
1881 {
1882  m = rte_pktmbuf_prefree_seg(m);
1883  if (likely(m != NULL))
1884  rte_mbuf_raw_free(m);
1885 }
1886 
1896 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1897 {
1898  struct rte_mbuf *m_next;
1899 
1900  if (m != NULL)
1902 
1903  while (m != NULL) {
1904  m_next = m->next;
1906  m = m_next;
1907  }
1908 }
1909 
1927 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1928  struct rte_mempool *mp)
1929 {
1930  struct rte_mbuf *mc, *mi, **prev;
1931  uint32_t pktlen;
1932  uint16_t nseg;
1933 
1934  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1935  return NULL;
1936 
1937  mi = mc;
1938  prev = &mi->next;
1939  pktlen = md->pkt_len;
1940  nseg = 0;
1941 
1942  do {
1943  nseg++;
1944  rte_pktmbuf_attach(mi, md);
1945  *prev = mi;
1946  prev = &mi->next;
1947  } while ((md = md->next) != NULL &&
1948  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1949 
1950  *prev = NULL;
1951  mc->nb_segs = nseg;
1952  mc->pkt_len = pktlen;
1953 
1954  /* Allocation of new indirect segment failed */
1955  if (unlikely (mi == NULL)) {
1956  rte_pktmbuf_free(mc);
1957  return NULL;
1958  }
1959 
1960  __rte_mbuf_sanity_check(mc, 1);
1961  return mc;
1962 }
1963 
1975 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1976 {
1978 
1979  do {
1980  rte_mbuf_refcnt_update(m, v);
1981  } while ((m = m->next) != NULL);
1982 }
1983 
1992 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1993 {
1995  return m->data_off;
1996 }
1997 
2006 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
2007 {
2009  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
2010  m->data_len);
2011 }
2012 
2021 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
2022 {
2024  while (m->next != NULL)
2025  m = m->next;
2026  return m;
2027 }
2028 
2043 #define rte_pktmbuf_mtod_offset(m, t, o) \
2044  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
2045 
2058 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
2059 
2069 #define rte_pktmbuf_iova_offset(m, o) \
2070  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
2071 
2072 /* deprecated */
2073 #define rte_pktmbuf_mtophys_offset(m, o) \
2074  rte_pktmbuf_iova_offset(m, o)
2075 
2083 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
2084 
2085 /* deprecated */
2086 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
2087 
2096 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
2097 
2106 #define rte_pktmbuf_data_len(m) ((m)->data_len)
2107 
2123 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
2124  uint16_t len)
2125 {
2127 
2128  if (unlikely(len > rte_pktmbuf_headroom(m)))
2129  return NULL;
2130 
2131  /* NB: elaborating the subtraction like this instead of using
2132  * -= allows us to ensure the result type is uint16_t
2133  * avoiding compiler warnings on gcc 8.1 at least */
2134  m->data_off = (uint16_t)(m->data_off - len);
2135  m->data_len = (uint16_t)(m->data_len + len);
2136  m->pkt_len = (m->pkt_len + len);
2137 
2138  return (char *)m->buf_addr + m->data_off;
2139 }
2140 
2156 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
2157 {
2158  void *tail;
2159  struct rte_mbuf *m_last;
2160 
2162 
2163  m_last = rte_pktmbuf_lastseg(m);
2164  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
2165  return NULL;
2166 
2167  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
2168  m_last->data_len = (uint16_t)(m_last->data_len + len);
2169  m->pkt_len = (m->pkt_len + len);
2170  return (char*) tail;
2171 }
2172 
2187 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
2188 {
2190 
2191  if (unlikely(len > m->data_len))
2192  return NULL;
2193 
2194  /* NB: elaborating the addition like this instead of using
2195  * += allows us to ensure the result type is uint16_t
2196  * avoiding compiler warnings on gcc 8.1 at least */
2197  m->data_len = (uint16_t)(m->data_len - len);
2198  m->data_off = (uint16_t)(m->data_off + len);
2199  m->pkt_len = (m->pkt_len - len);
2200  return (char *)m->buf_addr + m->data_off;
2201 }
2202 
2217 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
2218 {
2219  struct rte_mbuf *m_last;
2220 
2222 
2223  m_last = rte_pktmbuf_lastseg(m);
2224  if (unlikely(len > m_last->data_len))
2225  return -1;
2226 
2227  m_last->data_len = (uint16_t)(m_last->data_len - len);
2228  m->pkt_len = (m->pkt_len - len);
2229  return 0;
2230 }
2231 
2241 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
2242 {
2244  return !!(m->nb_segs == 1);
2245 }
2246 
2250 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
2251  uint32_t len, void *buf);
2252 
2273 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
2274  uint32_t off, uint32_t len, void *buf)
2275 {
2276  if (likely(off + len <= rte_pktmbuf_data_len(m)))
2277  return rte_pktmbuf_mtod_offset(m, char *, off);
2278  else
2279  return __rte_pktmbuf_read(m, off, len, buf);
2280 }
2281 
2298 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
2299 {
2300  struct rte_mbuf *cur_tail;
2301 
2302  /* Check for number-of-segments-overflow */
2303  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
2304  return -EOVERFLOW;
2305 
2306  /* Chain 'tail' onto the old tail */
2307  cur_tail = rte_pktmbuf_lastseg(head);
2308  cur_tail->next = tail;
2309 
2310  /* accumulate number of segments and total length.
2311  * NB: elaborating the addition like this instead of using
2312  * -= allows us to ensure the result type is uint16_t
2313  * avoiding compiler warnings on gcc 8.1 at least */
2314  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
2315  head->pkt_len += tail->pkt_len;
2316 
2317  /* pkt_len is only set in the head */
2318  tail->pkt_len = tail->data_len;
2319 
2320  return 0;
2321 }
2322 
2323 /*
2324  * @warning
2325  * @b EXPERIMENTAL: This API may change without prior notice.
2326  *
2327  * For given input values generate raw tx_offload value.
2328  * Note that it is caller responsibility to make sure that input parameters
2329  * don't exceed maximum bit-field values.
2330  * @param il2
2331  * l2_len value.
2332  * @param il3
2333  * l3_len value.
2334  * @param il4
2335  * l4_len value.
2336  * @param tso
2337  * tso_segsz value.
2338  * @param ol3
2339  * outer_l3_len value.
2340  * @param ol2
2341  * outer_l2_len value.
2342  * @param unused
2343  * unused value.
2344  * @return
2345  * raw tx_offload value.
2346  */
2347 static __rte_always_inline uint64_t
2348 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
2349  uint64_t ol3, uint64_t ol2, uint64_t unused)
2350 {
2351  return il2 << RTE_MBUF_L2_LEN_OFS |
2352  il3 << RTE_MBUF_L3_LEN_OFS |
2353  il4 << RTE_MBUF_L4_LEN_OFS |
2354  tso << RTE_MBUF_TSO_SEGSZ_OFS |
2355  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
2356  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
2357  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
2358 }
2359 
2370 static inline int
2372 {
2373  uint64_t ol_flags = m->ol_flags;
2374 
2375  /* Does packet set any of available offloads? */
2376  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
2377  return 0;
2378 
2379  /* IP checksum can be counted only for IPv4 packet */
2380  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
2381  return -EINVAL;
2382 
2383  /* IP type not set when required */
2384  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
2385  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
2386  return -EINVAL;
2387 
2388  /* Check requirements for TSO packet */
2389  if (ol_flags & PKT_TX_TCP_SEG)
2390  if ((m->tso_segsz == 0) ||
2391  ((ol_flags & PKT_TX_IPV4) &&
2392  !(ol_flags & PKT_TX_IP_CKSUM)))
2393  return -EINVAL;
2394 
2395  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
2396  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2397  !(ol_flags & PKT_TX_OUTER_IPV4))
2398  return -EINVAL;
2399 
2400  return 0;
2401 }
2402 
2415 static inline int
2417 {
2418  size_t seg_len, copy_len;
2419  struct rte_mbuf *m;
2420  struct rte_mbuf *m_next;
2421  char *buffer;
2422 
2423  if (rte_pktmbuf_is_contiguous(mbuf))
2424  return 0;
2425 
2426  /* Extend first segment to the total packet length */
2427  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
2428 
2429  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
2430  return -1;
2431 
2432  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
2433  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
2434 
2435  /* Append data from next segments to the first one */
2436  m = mbuf->next;
2437  while (m != NULL) {
2438  m_next = m->next;
2439 
2440  seg_len = rte_pktmbuf_data_len(m);
2441  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
2442  buffer += seg_len;
2443 
2445  m = m_next;
2446  }
2447 
2448  mbuf->next = NULL;
2449  mbuf->nb_segs = 1;
2450 
2451  return 0;
2452 }
2453 
2468 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2469 
2473 static inline uint32_t
2475 {
2476  return m->hash.sched.queue_id;
2477 }
2478 
2482 static inline uint8_t
2484 {
2485  return m->hash.sched.traffic_class;
2486 }
2487 
2491 static inline uint8_t
2493 {
2494  return m->hash.sched.color;
2495 }
2496 
2509 static inline void
2510 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
2511  uint8_t *traffic_class,
2512  uint8_t *color)
2513 {
2514  struct rte_mbuf_sched sched = m->hash.sched;
2515 
2516  *queue_id = sched.queue_id;
2517  *traffic_class = sched.traffic_class;
2518  *color = sched.color;
2519 }
2520 
2524 static inline void
2526 {
2527  m->hash.sched.queue_id = queue_id;
2528 }
2529 
2533 static inline void
2535 {
2536  m->hash.sched.traffic_class = traffic_class;
2537 }
2538 
2542 static inline void
2544 {
2545  m->hash.sched.color = color;
2546 }
2547 
2560 static inline void
2562  uint8_t traffic_class,
2563  uint8_t color)
2564 {
2565  m->hash.sched = (struct rte_mbuf_sched){
2566  .queue_id = queue_id,
2567  .traffic_class = traffic_class,
2568  .color = color,
2569  .reserved = 0,
2570  };
2571 }
2572 
2573 #ifdef __cplusplus
2574 }
2575 #endif
2576 
2577 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
Definition: rte_mbuf.h:739
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:809
struct rte_mbuf * next
Definition: rte_mbuf.h:686
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:988
uint16_t txq
Definition: rte_mbuf.h:642
uint64_t timestamp
Definition: rte_mbuf.h:674
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:665
#define __rte_always_inline
Definition: rte_common.h:153
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:253
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1484
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:2534
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:597
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:469
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:978
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:401
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:547
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1421
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2371
uint32_t queue_id
Definition: rte_mbuf.h:477
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1896
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2474
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:194
uint64_t l2_len
Definition: rte_mbuf.h:694
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1927
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1880
void * buf_addr
Definition: rte_mbuf.h:537
uint32_t l2_type
Definition: rte_mbuf.h:591
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:855
uint16_t data_len
Definition: rte_mbuf.h:615
uint32_t lo
Definition: rte_mbuf.h:629
rte_mbuf_extbuf_free_callback_t free_cb
Definition: rte_mbuf.h:752
void * userdata
Definition: rte_mbuf.h:681
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:2298
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2483
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:882
#define __rte_unused
Definition: rte_common.h:84
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint8_t inner_l2_type
Definition: rte_mbuf.h:604
uint64_t tso_segsz
Definition: rte_mbuf.h:702
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:471
uint64_t l4_len
Definition: rte_mbuf.h:700
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1992
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1506
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1437
uint32_t cache_size
Definition: rte_mempool.h:230
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:357
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:788
#define PKT_TX_IPV6
Definition: rte_mbuf.h:340
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:1147
uint16_t nb_segs
Definition: rte_mbuf.h:568
uint16_t port
Definition: rte_mbuf.h:573
uint64_t outer_l3_len
Definition: rte_mbuf.h:717
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1838
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2241
uint64_t l3_len
Definition: rte_mbuf.h:698
uint32_t l4_type
Definition: rte_mbuf.h:593
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:165
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:364
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:2561
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2006
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1255
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:2510
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:300
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:728
uint16_t timesync
Definition: rte_mbuf.h:731
uint32_t hi
Definition: rte_mbuf.h:632
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:2525
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:473
struct rte_mbuf_sched sched
Definition: rte_mbuf.h:637
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:2543
#define RTE_MIN(a, b)
Definition: rte_common.h:433
#define PKT_TX_IPV4
Definition: rte_mbuf.h:332
struct rte_mbuf::@189::@201::@205 txadapter
#define RTE_MBUF_CLONED(mb)
Definition: rte_mbuf.h:963
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:1000
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1090
uint8_t color
Definition: rte_mbuf.h:482
RTE_STD_C11 union rte_mbuf::@186 __rte_aligned
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:2416
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1482
uint64_t outer_l2_len
Definition: rte_mbuf.h:719
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:267
uint16_t refcnt
Definition: rte_mbuf.h:566
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2187
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:951
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:2096
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1708
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1454
uint32_t tun_type
Definition: rte_mbuf.h:594
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:348
uint64_t ol_flags
Definition: rte_mbuf.h:575
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1799
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1115
uint32_t pkt_len
Definition: rte_mbuf.h:614
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:316
uint16_t buf_len
Definition: rte_mbuf.h:667
uint32_t inner_l4_type
Definition: rte_mbuf.h:610
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:2106
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:2058
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:1081
uint32_t packet_type
Definition: rte_mbuf.h:589
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1451
uint32_t seqn
Definition: rte_mbuf.h:734
#define EXT_ATTACHED_MBUF
Definition: rte_mbuf.h:399
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1400
uint8_t inner_l3_type
Definition: rte_mbuf.h:606
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1581
#define RTE_MBUF_HAS_EXTBUF(mb)
Definition: rte_mbuf.h:970
#define __rte_cache_aligned
Definition: rte_memory.h:64
#define RTE_STD_C11
Definition: rte_common.h:40
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:324
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:685
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:1129
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2156
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:1099
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
Definition: rte_mbuf.h:746
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:834
uint32_t tx_metadata
Definition: rte_mbuf.h:659
uint32_t rss
Definition: rte_mbuf.h:622
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2217
uint64_t rte_iova_t
Definition: rte_memory.h:80
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:926
struct rte_mbuf::@189::@201::@204 fdir
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:2273
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:2123
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1230
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1975
uint64_t phys_addr_t
Definition: rte_memory.h:71
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:170
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:2021
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
Definition: rte_mbuf.h:900
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2492
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:377
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1608
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1323
uint16_t reserved
Definition: rte_mbuf.h:484
uint64_t udata64
Definition: rte_mbuf.h:682
uint8_t traffic_class
Definition: rte_mbuf.h:478
uint32_t l3_type
Definition: rte_mbuf.h:592
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:177
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:771
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:565
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1636
uint64_t tx_offload
Definition: rte_mbuf.h:691
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
uint16_t vlan_tci
Definition: rte_mbuf.h:617
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:754
#define RTE_SET_USED(x)
Definition: rte_common.h:90
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:2043
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1659