DPDK  2.1.0
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
56 #include <stdint.h>
57 #include <rte_common.h>
58 #include <rte_mempool.h>
59 #include <rte_memory.h>
60 #include <rte_atomic.h>
61 #include <rte_prefetch.h>
62 #include <rte_branch_prediction.h>
63 
64 #ifdef __cplusplus
65 extern "C" {
66 #endif
67 
68 /* deprecated options */
69 #pragma GCC poison RTE_MBUF_SCATTER_GATHER
70 #pragma GCC poison RTE_MBUF_REFCNT
71 
72 /*
73  * Packet Offload Features Flags. It also carry packet type information.
74  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
75  *
76  * - RX flags start at bit position zero, and get added to the left of previous
77  * flags.
78  * - The most-significant 3 bits are reserved for generic mbuf flags
79  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
80  * added to the right of the previously defined flags i.e. they should count
81  * downwards, not upwards.
82  *
83  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
84  * rte_get_tx_ol_flag_name().
85  */
86 #define PKT_RX_VLAN_PKT (1ULL << 0)
87 #define PKT_RX_RSS_HASH (1ULL << 1)
88 #define PKT_RX_FDIR (1ULL << 2)
89 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
90 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
91 #define PKT_RX_EIP_CKSUM_BAD (0ULL << 0)
92 #define PKT_RX_OVERSIZE (0ULL << 0)
93 #define PKT_RX_HBUF_OVERFLOW (0ULL << 0)
94 #define PKT_RX_RECIP_ERR (0ULL << 0)
95 #define PKT_RX_MAC_ERR (0ULL << 0)
96 #ifndef RTE_NEXT_ABI
97 #define PKT_RX_IPV4_HDR (1ULL << 5)
98 #define PKT_RX_IPV4_HDR_EXT (1ULL << 6)
99 #define PKT_RX_IPV6_HDR (1ULL << 7)
100 #define PKT_RX_IPV6_HDR_EXT (1ULL << 8)
101 #endif /* RTE_NEXT_ABI */
102 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
103 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
104 #ifndef RTE_NEXT_ABI
105 #define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11)
106 #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12)
107 #endif /* RTE_NEXT_ABI */
108 #define PKT_RX_FDIR_ID (1ULL << 13)
109 #define PKT_RX_FDIR_FLX (1ULL << 14)
110 #define PKT_RX_QINQ_PKT (1ULL << 15)
111 /* add new RX flags here */
112 
113 /* add new TX flags here */
114 
118 #define PKT_TX_QINQ_PKT (1ULL << 49)
133 #define PKT_TX_TCP_SEG (1ULL << 50)
134 
135 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
148 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
149 #define PKT_TX_TCP_CKSUM (1ULL << 52)
150 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
151 #define PKT_TX_UDP_CKSUM (3ULL << 52)
152 #define PKT_TX_L4_MASK (3ULL << 52)
161 #define PKT_TX_IP_CKSUM (1ULL << 54)
162 
169 #define PKT_TX_IPV4 (1ULL << 55)
170 
177 #define PKT_TX_IPV6 (1ULL << 56)
178 
179 #define PKT_TX_VLAN_PKT (1ULL << 57)
189 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
190 
196 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
197 
203 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
204 
205 #define __RESERVED (1ULL << 61)
207 #define IND_ATTACHED_MBUF (1ULL << 62)
209 /* Use final bit of flags to indicate a control mbuf */
210 #define CTRL_MBUF_FLAG (1ULL << 63)
212 #ifdef RTE_NEXT_ABI
213 /*
214  * 32 bits are divided into several fields to mark packet types. Note that
215  * each field is indexical.
216  * - Bit 3:0 is for L2 types.
217  * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types.
218  * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types.
219  * - Bit 15:12 is for tunnel types.
220  * - Bit 19:16 is for inner L2 types.
221  * - Bit 23:20 is for inner L3 types.
222  * - Bit 27:24 is for inner L4 types.
223  * - Bit 31:28 is reserved.
224  *
225  * To be compatible with Vector PMD, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV4_EXT,
226  * RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP
227  * and RTE_PTYPE_L4_SCTP should be kept as below in a contiguous 7 bits.
228  *
229  * Note that L3 types values are selected for checking IPV4/IPV6 header from
230  * performance point of view. Reading annotations of RTE_ETH_IS_IPV4_HDR and
231  * RTE_ETH_IS_IPV6_HDR is needed for any future changes of L3 type values.
232  *
233  * Note that the packet types of the same packet recognized by different
234  * hardware may be different, as different hardware may have different
235  * capability of packet type recognition.
236  *
237  * examples:
238  * <'ether type'=0x0800
239  * | 'version'=4, 'protocol'=0x29
240  * | 'version'=6, 'next header'=0x3A
241  * | 'ICMPv6 header'>
242  * will be recognized on i40e hardware as packet type combination of,
243  * RTE_PTYPE_L2_ETHER |
244  * RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245  * RTE_PTYPE_TUNNEL_IP |
246  * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
247  * RTE_PTYPE_INNER_L4_ICMP.
248  *
249  * <'ether type'=0x86DD
250  * | 'version'=6, 'next header'=0x2F
251  * | 'GRE header'
252  * | 'version'=6, 'next header'=0x11
253  * | 'UDP header'>
254  * will be recognized on i40e hardware as packet type combination of,
255  * RTE_PTYPE_L2_ETHER |
256  * RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
257  * RTE_PTYPE_TUNNEL_GRENAT |
258  * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
259  * RTE_PTYPE_INNER_L4_UDP.
260  */
261 #define RTE_PTYPE_UNKNOWN 0x00000000
262 
269 #define RTE_PTYPE_L2_ETHER 0x00000001
270 
276 #define RTE_PTYPE_L2_ETHER_TIMESYNC 0x00000002
277 
283 #define RTE_PTYPE_L2_ETHER_ARP 0x00000003
284 
290 #define RTE_PTYPE_L2_ETHER_LLDP 0x00000004
291 
295 #define RTE_PTYPE_L2_MASK 0x0000000f
296 
305 #define RTE_PTYPE_L3_IPV4 0x00000010
306 
315 #define RTE_PTYPE_L3_IPV4_EXT 0x00000030
316 
325 #define RTE_PTYPE_L3_IPV6 0x00000040
326 
335 #define RTE_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090
336 
346 #define RTE_PTYPE_L3_IPV6_EXT 0x000000c0
347 
357 #define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0
358 
362 #define RTE_PTYPE_L3_MASK 0x000000f0
363 
374 #define RTE_PTYPE_L4_TCP 0x00000100
375 
386 #define RTE_PTYPE_L4_UDP 0x00000200
387 
403 #define RTE_PTYPE_L4_FRAG 0x00000300
404 
415 #define RTE_PTYPE_L4_SCTP 0x00000400
416 
427 #define RTE_PTYPE_L4_ICMP 0x00000500
428 
443 #define RTE_PTYPE_L4_NONFRAG 0x00000600
444 
448 #define RTE_PTYPE_L4_MASK 0x00000f00
449 
459 #define RTE_PTYPE_TUNNEL_IP 0x00001000
460 
470 #define RTE_PTYPE_TUNNEL_GRE 0x00002000
471 
483 #define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
484 
497 #define RTE_PTYPE_TUNNEL_NVGRE 0x00004000
498 
510 #define RTE_PTYPE_TUNNEL_GENEVE 0x00005000
511 
517 #define RTE_PTYPE_TUNNEL_GRENAT 0x00006000
518 
521 #define RTE_PTYPE_TUNNEL_MASK 0x0000f000
522 
529 #define RTE_PTYPE_INNER_L2_ETHER 0x00010000
530 
536 #define RTE_PTYPE_INNER_L2_ETHER_VLAN 0x00020000
537 
540 #define RTE_PTYPE_INNER_L2_MASK 0x000f0000
541 
549 #define RTE_PTYPE_INNER_L3_IPV4 0x00100000
550 
558 #define RTE_PTYPE_INNER_L3_IPV4_EXT 0x00200000
559 
567 #define RTE_PTYPE_INNER_L3_IPV6 0x00300000
568 
576 #define RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000
577 
586 #define RTE_PTYPE_INNER_L3_IPV6_EXT 0x00500000
587 
597 #define RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000
598 
601 #define RTE_PTYPE_INNER_L3_MASK 0x00f00000
602 
613 #define RTE_PTYPE_INNER_L4_TCP 0x01000000
614 
625 #define RTE_PTYPE_INNER_L4_UDP 0x02000000
626 
637 #define RTE_PTYPE_INNER_L4_FRAG 0x03000000
638 
649 #define RTE_PTYPE_INNER_L4_SCTP 0x04000000
650 
661 #define RTE_PTYPE_INNER_L4_ICMP 0x05000000
662 
674 #define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000
675 
678 #define RTE_PTYPE_INNER_L4_MASK 0x0f000000
679 
685 #define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4)
686 
692 #define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6)
693 
694 /* Check if it is a tunneling packet */
695 #define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & (RTE_PTYPE_TUNNEL_MASK | \
696  RTE_PTYPE_INNER_L2_MASK | \
697  RTE_PTYPE_INNER_L3_MASK | \
698  RTE_PTYPE_INNER_L4_MASK))
699 #endif /* RTE_NEXT_ABI */
700 
702 #define RTE_MBUF_PRIV_ALIGN 8
703 
712 const char *rte_get_rx_ol_flag_name(uint64_t mask);
713 
724 const char *rte_get_tx_ol_flag_name(uint64_t mask);
725 
732 #define RTE_MBUF_DEFAULT_DATAROOM 2048
733 #define RTE_MBUF_DEFAULT_BUF_SIZE \
734  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
735 
736 /* define a set of marker types that can be used to refer to set points in the
737  * mbuf */
738 typedef void *MARKER[0];
739 typedef uint8_t MARKER8[0];
740 typedef uint64_t MARKER64[0];
746 struct rte_mbuf {
747  MARKER cacheline0;
748 
749  void *buf_addr;
752  uint16_t buf_len;
754  /* next 6 bytes are initialised on RX descriptor rearm */
755  MARKER8 rearm_data;
756  uint16_t data_off;
757 
766  union {
768  uint16_t refcnt;
769  };
770  uint8_t nb_segs;
771  uint8_t port;
773  uint64_t ol_flags;
775  /* remaining bytes are set on RX when pulling packet from descriptor */
776  MARKER rx_descriptor_fields1;
777 
778 #ifdef RTE_NEXT_ABI
779  /*
780  * The packet type, which is the combination of outer/inner L2, L3, L4
781  * and tunnel types.
782  */
783  union {
784  uint32_t packet_type;
785  struct {
786  uint32_t l2_type:4;
787  uint32_t l3_type:4;
788  uint32_t l4_type:4;
789  uint32_t tun_type:4;
790  uint32_t inner_l2_type:4;
791  uint32_t inner_l3_type:4;
792  uint32_t inner_l4_type:4;
793  };
794  };
795 
796  uint32_t pkt_len;
797  uint16_t data_len;
798  uint16_t vlan_tci;
799 #else /* RTE_NEXT_ABI */
800 
805  uint16_t packet_type;
806 
807  uint16_t data_len;
808  uint32_t pkt_len;
809  uint16_t vlan_tci;
810  uint16_t vlan_tci_outer;
811 #endif /* RTE_NEXT_ABI */
812  union {
813  uint32_t rss;
814  struct {
815  union {
816  struct {
817  uint16_t hash;
818  uint16_t id;
819  };
820  uint32_t lo;
822  };
823  uint32_t hi;
826  } fdir;
827  uint32_t sched;
828  uint32_t usr;
829  } hash;
831  uint32_t seqn;
832 #ifdef RTE_NEXT_ABI
833  uint16_t vlan_tci_outer;
834 #endif /* RTE_NEXT_ABI */
835 
836  /* second cache line - fields only used in slow path or on TX */
837  MARKER cacheline1 __rte_cache_aligned;
838 
839  union {
840  void *userdata;
841  uint64_t udata64;
842  };
843 
844  struct rte_mempool *pool;
845  struct rte_mbuf *next;
847  /* fields to support TX offloads */
848  union {
849  uint64_t tx_offload;
850  struct {
851  uint64_t l2_len:7;
852  uint64_t l3_len:9;
853  uint64_t l4_len:8;
854  uint64_t tso_segsz:16;
856  /* fields for TX offloading of tunnels */
857  uint64_t outer_l3_len:9;
858  uint64_t outer_l2_len:7;
860  /* uint64_t unused:8; */
861  };
862  };
863 
866  uint16_t priv_size;
867 
869  uint16_t timesync;
871 
872 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
873 
882 static inline struct rte_mbuf *
884 {
885  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
886 }
887 
896 static inline char *
898 {
899  char *buffer_addr;
900  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
901  return buffer_addr;
902 }
903 
907 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
908 
912 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
913 
922  uint16_t mbuf_priv_size;
923 };
924 
925 #ifdef RTE_LIBRTE_MBUF_DEBUG
926 
928 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
929 
931 #define __rte_mbuf_sanity_check_raw(m, is_h) do { \
932  if ((m) != NULL) \
933  rte_mbuf_sanity_check(m, is_h); \
934 } while (0)
935 
937 #define RTE_MBUF_ASSERT(exp) \
938 if (!(exp)) { \
939  rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
940 }
941 
942 #else /* RTE_LIBRTE_MBUF_DEBUG */
943 
945 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
946 
948 #define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
949 
951 #define RTE_MBUF_ASSERT(exp) do { } while (0)
952 
953 #endif /* RTE_LIBRTE_MBUF_DEBUG */
954 
955 #ifdef RTE_MBUF_REFCNT_ATOMIC
956 
964 static inline uint16_t
965 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
966 {
967  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
968 }
969 
977 static inline void
978 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
979 {
980  rte_atomic16_set(&m->refcnt_atomic, new_value);
981 }
982 
992 static inline uint16_t
993 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
994 {
995  /*
996  * The atomic_add is an expensive operation, so we don't want to
997  * call it in the case where we know we are the uniq holder of
998  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
999  * operation has to be used because concurrent accesses on the
1000  * reference counter can occur.
1001  */
1002  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1003  rte_mbuf_refcnt_set(m, 1 + value);
1004  return 1 + value;
1005  }
1006 
1007  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
1008 }
1009 
1010 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
1011 
1015 static inline uint16_t
1016 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1017 {
1018  m->refcnt = (uint16_t)(m->refcnt + value);
1019  return m->refcnt;
1020 }
1021 
1025 static inline uint16_t
1027 {
1028  return m->refcnt;
1029 }
1030 
1034 static inline void
1035 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1036 {
1037  m->refcnt = new_value;
1038 }
1039 
1040 #endif /* RTE_MBUF_REFCNT_ATOMIC */
1041 
1043 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1044  if ((m) != NULL) \
1045  rte_prefetch0(m); \
1046 } while (0)
1047 
1048 
1061 void
1062 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1063 
1075 static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
1076 {
1077  struct rte_mbuf *m;
1078  void *mb = NULL;
1079  if (rte_mempool_get(mp, &mb) < 0)
1080  return NULL;
1081  m = (struct rte_mbuf *)mb;
1083  rte_mbuf_refcnt_set(m, 1);
1084  return m;
1085 }
1086 
1095 static inline void __attribute__((always_inline))
1096 __rte_mbuf_raw_free(struct rte_mbuf *m)
1097 {
1099  rte_mempool_put(m->pool, m);
1100 }
1101 
1102 /* Operations on ctrl mbuf */
1103 
1123 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1124  void *m, unsigned i);
1125 
1138 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
1139 
1146 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
1147 
1156 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
1157 
1166 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
1167 
1177 static inline int
1179 {
1180  return !!(m->ol_flags & CTRL_MBUF_FLAG);
1181 }
1182 
1183 /* Operations on pkt mbuf */
1184 
1204 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1205  void *m, unsigned i);
1206 
1207 
1224 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1225 
1261 struct rte_mempool *
1262 rte_pktmbuf_pool_create(const char *name, unsigned n,
1263  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1264  int socket_id);
1265 
1277 static inline uint16_t
1279 {
1280  struct rte_pktmbuf_pool_private *mbp_priv;
1281 
1282  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1283  return mbp_priv->mbuf_data_room_size;
1284 }
1285 
1298 static inline uint16_t
1300 {
1301  struct rte_pktmbuf_pool_private *mbp_priv;
1302 
1303  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1304  return mbp_priv->mbuf_priv_size;
1305 }
1306 
1315 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1316 {
1317  m->next = NULL;
1318  m->pkt_len = 0;
1319  m->tx_offload = 0;
1320  m->vlan_tci = 0;
1321  m->vlan_tci_outer = 0;
1322  m->nb_segs = 1;
1323  m->port = 0xff;
1324 
1325  m->ol_flags = 0;
1326  m->packet_type = 0;
1327  m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
1328  RTE_PKTMBUF_HEADROOM : m->buf_len;
1329 
1330  m->data_len = 0;
1332 }
1333 
1347 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1348 {
1349  struct rte_mbuf *m;
1350  if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
1351  rte_pktmbuf_reset(m);
1352  return m;
1353 }
1354 
1370 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1371 {
1372  struct rte_mbuf *md;
1373 
1375  rte_mbuf_refcnt_read(mi) == 1);
1376 
1377  /* if m is not direct, get the mbuf that embeds the data */
1378  if (RTE_MBUF_DIRECT(m))
1379  md = m;
1380  else
1381  md = rte_mbuf_from_indirect(m);
1382 
1383  rte_mbuf_refcnt_update(md, 1);
1384  mi->priv_size = m->priv_size;
1385  mi->buf_physaddr = m->buf_physaddr;
1386  mi->buf_addr = m->buf_addr;
1387  mi->buf_len = m->buf_len;
1388 
1389  mi->next = m->next;
1390  mi->data_off = m->data_off;
1391  mi->data_len = m->data_len;
1392  mi->port = m->port;
1393  mi->vlan_tci = m->vlan_tci;
1394  mi->vlan_tci_outer = m->vlan_tci_outer;
1395  mi->tx_offload = m->tx_offload;
1396  mi->hash = m->hash;
1397 
1398  mi->next = NULL;
1399  mi->pkt_len = mi->data_len;
1400  mi->nb_segs = 1;
1401  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1402  mi->packet_type = m->packet_type;
1403 
1404  __rte_mbuf_sanity_check(mi, 1);
1406 }
1407 
1418 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1419 {
1420  struct rte_mempool *mp = m->pool;
1421  uint32_t mbuf_size, buf_len, priv_size;
1422 
1423  priv_size = rte_pktmbuf_priv_size(mp);
1424  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1425  buf_len = rte_pktmbuf_data_room_size(mp);
1426 
1427  m->priv_size = priv_size;
1428  m->buf_addr = (char *)m + mbuf_size;
1429  m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
1430  m->buf_len = (uint16_t)buf_len;
1431  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1432  m->data_len = 0;
1433  m->ol_flags = 0;
1434 }
1435 
1436 static inline struct rte_mbuf* __attribute__((always_inline))
1437 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1438 {
1440 
1441  if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
1442 
1443  /* if this is an indirect mbuf, then
1444  * - detach mbuf
1445  * - free attached mbuf segment
1446  */
1447  if (RTE_MBUF_INDIRECT(m)) {
1448  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1449  rte_pktmbuf_detach(m);
1450  if (rte_mbuf_refcnt_update(md, -1) == 0)
1451  __rte_mbuf_raw_free(md);
1452  }
1453  return m;
1454  }
1455  return NULL;
1456 }
1457 
1467 static inline void __attribute__((always_inline))
1469 {
1470  if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
1471  m->next = NULL;
1472  __rte_mbuf_raw_free(m);
1473  }
1474 }
1475 
1485 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1486 {
1487  struct rte_mbuf *m_next;
1488 
1490 
1491  while (m != NULL) {
1492  m_next = m->next;
1494  m = m_next;
1495  }
1496 }
1497 
1515 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1516  struct rte_mempool *mp)
1517 {
1518  struct rte_mbuf *mc, *mi, **prev;
1519  uint32_t pktlen;
1520  uint8_t nseg;
1521 
1522  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1523  return NULL;
1524 
1525  mi = mc;
1526  prev = &mi->next;
1527  pktlen = md->pkt_len;
1528  nseg = 0;
1529 
1530  do {
1531  nseg++;
1532  rte_pktmbuf_attach(mi, md);
1533  *prev = mi;
1534  prev = &mi->next;
1535  } while ((md = md->next) != NULL &&
1536  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1537 
1538  *prev = NULL;
1539  mc->nb_segs = nseg;
1540  mc->pkt_len = pktlen;
1541 
1542  /* Allocation of new indirect segment failed */
1543  if (unlikely (mi == NULL)) {
1544  rte_pktmbuf_free(mc);
1545  return NULL;
1546  }
1547 
1548  __rte_mbuf_sanity_check(mc, 1);
1549  return mc;
1550 }
1551 
1563 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1564 {
1566 
1567  do {
1568  rte_mbuf_refcnt_update(m, v);
1569  } while ((m = m->next) != NULL);
1570 }
1571 
1580 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1581 {
1583  return m->data_off;
1584 }
1585 
1594 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1595 {
1597  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1598  m->data_len);
1599 }
1600 
1609 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1610 {
1611  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1612 
1614  while (m2->next != NULL)
1615  m2 = m2->next;
1616  return m2;
1617 }
1618 
1633 #define rte_pktmbuf_mtod_offset(m, t, o) \
1634  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1635 
1648 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1649 
1658 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1659 
1668 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1669 
1685 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1686  uint16_t len)
1687 {
1689 
1690  if (unlikely(len > rte_pktmbuf_headroom(m)))
1691  return NULL;
1692 
1693  m->data_off -= len;
1694  m->data_len = (uint16_t)(m->data_len + len);
1695  m->pkt_len = (m->pkt_len + len);
1696 
1697  return (char *)m->buf_addr + m->data_off;
1698 }
1699 
1715 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1716 {
1717  void *tail;
1718  struct rte_mbuf *m_last;
1719 
1721 
1722  m_last = rte_pktmbuf_lastseg(m);
1723  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1724  return NULL;
1725 
1726  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1727  m_last->data_len = (uint16_t)(m_last->data_len + len);
1728  m->pkt_len = (m->pkt_len + len);
1729  return (char*) tail;
1730 }
1731 
1746 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1747 {
1749 
1750  if (unlikely(len > m->data_len))
1751  return NULL;
1752 
1753  m->data_len = (uint16_t)(m->data_len - len);
1754  m->data_off += len;
1755  m->pkt_len = (m->pkt_len - len);
1756  return (char *)m->buf_addr + m->data_off;
1757 }
1758 
1773 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1774 {
1775  struct rte_mbuf *m_last;
1776 
1778 
1779  m_last = rte_pktmbuf_lastseg(m);
1780  if (unlikely(len > m_last->data_len))
1781  return -1;
1782 
1783  m_last->data_len = (uint16_t)(m_last->data_len - len);
1784  m->pkt_len = (m->pkt_len - len);
1785  return 0;
1786 }
1787 
1797 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1798 {
1800  return !!(m->nb_segs == 1);
1801 }
1802 
1817 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1818 
1819 #ifdef __cplusplus
1820 }
1821 #endif
1822 
1823 #endif /* _RTE_MBUF_H_ */