DPDK  2.2.0
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
56 #include <stdint.h>
57 #include <rte_common.h>
58 #include <rte_mempool.h>
59 #include <rte_memory.h>
60 #include <rte_atomic.h>
61 #include <rte_prefetch.h>
62 #include <rte_branch_prediction.h>
63 
64 #ifdef __cplusplus
65 extern "C" {
66 #endif
67 
68 /* deprecated options */
69 #pragma GCC poison RTE_MBUF_SCATTER_GATHER
70 #pragma GCC poison RTE_MBUF_REFCNT
71 
72 /*
73  * Packet Offload Features Flags. It also carry packet type information.
74  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
75  *
76  * - RX flags start at bit position zero, and get added to the left of previous
77  * flags.
78  * - The most-significant 3 bits are reserved for generic mbuf flags
79  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
80  * added to the right of the previously defined flags i.e. they should count
81  * downwards, not upwards.
82  *
83  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
84  * rte_get_tx_ol_flag_name().
85  */
86 #define PKT_RX_VLAN_PKT (1ULL << 0)
87 #define PKT_RX_RSS_HASH (1ULL << 1)
88 #define PKT_RX_FDIR (1ULL << 2)
89 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
90 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
91 #define PKT_RX_EIP_CKSUM_BAD (0ULL << 0)
92 #define PKT_RX_OVERSIZE (0ULL << 0)
93 #define PKT_RX_HBUF_OVERFLOW (0ULL << 0)
94 #define PKT_RX_RECIP_ERR (0ULL << 0)
95 #define PKT_RX_MAC_ERR (0ULL << 0)
96 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
97 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
98 #define PKT_RX_FDIR_ID (1ULL << 13)
99 #define PKT_RX_FDIR_FLX (1ULL << 14)
100 #define PKT_RX_QINQ_PKT (1ULL << 15)
101 /* add new RX flags here */
102 
103 /* add new TX flags here */
104 
108 #define PKT_TX_QINQ_PKT (1ULL << 49)
123 #define PKT_TX_TCP_SEG (1ULL << 50)
124 
125 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
138 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
139 #define PKT_TX_TCP_CKSUM (1ULL << 52)
140 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
141 #define PKT_TX_UDP_CKSUM (3ULL << 52)
142 #define PKT_TX_L4_MASK (3ULL << 52)
151 #define PKT_TX_IP_CKSUM (1ULL << 54)
152 
159 #define PKT_TX_IPV4 (1ULL << 55)
160 
167 #define PKT_TX_IPV6 (1ULL << 56)
168 
169 #define PKT_TX_VLAN_PKT (1ULL << 57)
179 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
180 
186 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
187 
193 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
194 
195 #define __RESERVED (1ULL << 61)
197 #define IND_ATTACHED_MBUF (1ULL << 62)
199 /* Use final bit of flags to indicate a control mbuf */
200 #define CTRL_MBUF_FLAG (1ULL << 63)
202 /*
203  * 32 bits are divided into several fields to mark packet types. Note that
204  * each field is indexical.
205  * - Bit 3:0 is for L2 types.
206  * - Bit 7:4 is for L3 or outer L3 (for tunneling case) types.
207  * - Bit 11:8 is for L4 or outer L4 (for tunneling case) types.
208  * - Bit 15:12 is for tunnel types.
209  * - Bit 19:16 is for inner L2 types.
210  * - Bit 23:20 is for inner L3 types.
211  * - Bit 27:24 is for inner L4 types.
212  * - Bit 31:28 is reserved.
213  *
214  * To be compatible with Vector PMD, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV4_EXT,
215  * RTE_PTYPE_L3_IPV6, RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP
216  * and RTE_PTYPE_L4_SCTP should be kept as below in a contiguous 7 bits.
217  *
218  * Note that L3 types values are selected for checking IPV4/IPV6 header from
219  * performance point of view. Reading annotations of RTE_ETH_IS_IPV4_HDR and
220  * RTE_ETH_IS_IPV6_HDR is needed for any future changes of L3 type values.
221  *
222  * Note that the packet types of the same packet recognized by different
223  * hardware may be different, as different hardware may have different
224  * capability of packet type recognition.
225  *
226  * examples:
227  * <'ether type'=0x0800
228  * | 'version'=4, 'protocol'=0x29
229  * | 'version'=6, 'next header'=0x3A
230  * | 'ICMPv6 header'>
231  * will be recognized on i40e hardware as packet type combination of,
232  * RTE_PTYPE_L2_ETHER |
233  * RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
234  * RTE_PTYPE_TUNNEL_IP |
235  * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236  * RTE_PTYPE_INNER_L4_ICMP.
237  *
238  * <'ether type'=0x86DD
239  * | 'version'=6, 'next header'=0x2F
240  * | 'GRE header'
241  * | 'version'=6, 'next header'=0x11
242  * | 'UDP header'>
243  * will be recognized on i40e hardware as packet type combination of,
244  * RTE_PTYPE_L2_ETHER |
245  * RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
246  * RTE_PTYPE_TUNNEL_GRENAT |
247  * RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
248  * RTE_PTYPE_INNER_L4_UDP.
249  */
250 #define RTE_PTYPE_UNKNOWN 0x00000000
251 
258 #define RTE_PTYPE_L2_ETHER 0x00000001
259 
265 #define RTE_PTYPE_L2_ETHER_TIMESYNC 0x00000002
266 
272 #define RTE_PTYPE_L2_ETHER_ARP 0x00000003
273 
279 #define RTE_PTYPE_L2_ETHER_LLDP 0x00000004
280 
284 #define RTE_PTYPE_L2_MASK 0x0000000f
285 
294 #define RTE_PTYPE_L3_IPV4 0x00000010
295 
304 #define RTE_PTYPE_L3_IPV4_EXT 0x00000030
305 
314 #define RTE_PTYPE_L3_IPV6 0x00000040
315 
324 #define RTE_PTYPE_L3_IPV4_EXT_UNKNOWN 0x00000090
325 
335 #define RTE_PTYPE_L3_IPV6_EXT 0x000000c0
336 
346 #define RTE_PTYPE_L3_IPV6_EXT_UNKNOWN 0x000000e0
347 
351 #define RTE_PTYPE_L3_MASK 0x000000f0
352 
363 #define RTE_PTYPE_L4_TCP 0x00000100
364 
375 #define RTE_PTYPE_L4_UDP 0x00000200
376 
392 #define RTE_PTYPE_L4_FRAG 0x00000300
393 
404 #define RTE_PTYPE_L4_SCTP 0x00000400
405 
416 #define RTE_PTYPE_L4_ICMP 0x00000500
417 
432 #define RTE_PTYPE_L4_NONFRAG 0x00000600
433 
437 #define RTE_PTYPE_L4_MASK 0x00000f00
438 
448 #define RTE_PTYPE_TUNNEL_IP 0x00001000
449 
459 #define RTE_PTYPE_TUNNEL_GRE 0x00002000
460 
472 #define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
473 
486 #define RTE_PTYPE_TUNNEL_NVGRE 0x00004000
487 
499 #define RTE_PTYPE_TUNNEL_GENEVE 0x00005000
500 
506 #define RTE_PTYPE_TUNNEL_GRENAT 0x00006000
507 
510 #define RTE_PTYPE_TUNNEL_MASK 0x0000f000
511 
518 #define RTE_PTYPE_INNER_L2_ETHER 0x00010000
519 
525 #define RTE_PTYPE_INNER_L2_ETHER_VLAN 0x00020000
526 
529 #define RTE_PTYPE_INNER_L2_MASK 0x000f0000
530 
538 #define RTE_PTYPE_INNER_L3_IPV4 0x00100000
539 
547 #define RTE_PTYPE_INNER_L3_IPV4_EXT 0x00200000
548 
556 #define RTE_PTYPE_INNER_L3_IPV6 0x00300000
557 
565 #define RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN 0x00400000
566 
575 #define RTE_PTYPE_INNER_L3_IPV6_EXT 0x00500000
576 
586 #define RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN 0x00600000
587 
590 #define RTE_PTYPE_INNER_L3_MASK 0x00f00000
591 
602 #define RTE_PTYPE_INNER_L4_TCP 0x01000000
603 
614 #define RTE_PTYPE_INNER_L4_UDP 0x02000000
615 
626 #define RTE_PTYPE_INNER_L4_FRAG 0x03000000
627 
638 #define RTE_PTYPE_INNER_L4_SCTP 0x04000000
639 
650 #define RTE_PTYPE_INNER_L4_ICMP 0x05000000
651 
663 #define RTE_PTYPE_INNER_L4_NONFRAG 0x06000000
664 
667 #define RTE_PTYPE_INNER_L4_MASK 0x0f000000
668 
674 #define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4)
675 
681 #define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6)
682 
683 /* Check if it is a tunneling packet */
684 #define RTE_ETH_IS_TUNNEL_PKT(ptype) ((ptype) & (RTE_PTYPE_TUNNEL_MASK | \
685  RTE_PTYPE_INNER_L2_MASK | \
686  RTE_PTYPE_INNER_L3_MASK | \
687  RTE_PTYPE_INNER_L4_MASK))
688 
690 #define RTE_MBUF_PRIV_ALIGN 8
691 
700 const char *rte_get_rx_ol_flag_name(uint64_t mask);
701 
712 const char *rte_get_tx_ol_flag_name(uint64_t mask);
713 
720 #define RTE_MBUF_DEFAULT_DATAROOM 2048
721 #define RTE_MBUF_DEFAULT_BUF_SIZE \
722  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
723 
724 /* define a set of marker types that can be used to refer to set points in the
725  * mbuf */
726 typedef void *MARKER[0];
727 typedef uint8_t MARKER8[0];
728 typedef uint64_t MARKER64[0];
732 struct rte_mbuf_offload;
733 
737 struct rte_mbuf {
738  MARKER cacheline0;
739 
740  void *buf_addr;
743  uint16_t buf_len;
745  /* next 6 bytes are initialised on RX descriptor rearm */
746  MARKER8 rearm_data;
747  uint16_t data_off;
748 
757  union {
759  uint16_t refcnt;
760  };
761  uint8_t nb_segs;
762  uint8_t port;
764  uint64_t ol_flags;
766  /* remaining bytes are set on RX when pulling packet from descriptor */
767  MARKER rx_descriptor_fields1;
768 
769  /*
770  * The packet type, which is the combination of outer/inner L2, L3, L4
771  * and tunnel types.
772  */
773  union {
774  uint32_t packet_type;
775  struct {
776  uint32_t l2_type:4;
777  uint32_t l3_type:4;
778  uint32_t l4_type:4;
779  uint32_t tun_type:4;
780  uint32_t inner_l2_type:4;
781  uint32_t inner_l3_type:4;
782  uint32_t inner_l4_type:4;
783  };
784  };
785 
786  uint32_t pkt_len;
787  uint16_t data_len;
788  uint16_t vlan_tci;
790  union {
791  uint32_t rss;
792  struct {
793  union {
794  struct {
795  uint16_t hash;
796  uint16_t id;
797  };
798  uint32_t lo;
800  };
801  uint32_t hi;
804  } fdir;
805  struct {
806  uint32_t lo;
807  uint32_t hi;
808  } sched;
809  uint32_t usr;
810  } hash;
812  uint32_t seqn;
814  uint16_t vlan_tci_outer;
816  /* second cache line - fields only used in slow path or on TX */
817  MARKER cacheline1 __rte_cache_aligned;
818 
819  union {
820  void *userdata;
821  uint64_t udata64;
822  };
823 
824  struct rte_mempool *pool;
825  struct rte_mbuf *next;
827  /* fields to support TX offloads */
828  union {
829  uint64_t tx_offload;
830  struct {
831  uint64_t l2_len:7;
832  uint64_t l3_len:9;
833  uint64_t l4_len:8;
834  uint64_t tso_segsz:16;
836  /* fields for TX offloading of tunnels */
837  uint64_t outer_l3_len:9;
838  uint64_t outer_l2_len:7;
840  /* uint64_t unused:8; */
841  };
842  };
843 
846  uint16_t priv_size;
847 
849  uint16_t timesync;
850 
851  /* Chain of off-load operations to perform on mbuf */
852  struct rte_mbuf_offload *offload_ops;
854 
855 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
856 
865 static inline struct rte_mbuf *
867 {
868  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
869 }
870 
879 static inline char *
881 {
882  char *buffer_addr;
883  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
884  return buffer_addr;
885 }
886 
890 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
891 
895 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
896 
905  uint16_t mbuf_priv_size;
906 };
907 
908 #ifdef RTE_LIBRTE_MBUF_DEBUG
909 
911 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
912 
914 #define __rte_mbuf_sanity_check_raw(m, is_h) do { \
915  if ((m) != NULL) \
916  rte_mbuf_sanity_check(m, is_h); \
917 } while (0)
918 
920 #define RTE_MBUF_ASSERT(exp) \
921 if (!(exp)) { \
922  rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
923 }
924 
925 #else /* RTE_LIBRTE_MBUF_DEBUG */
926 
928 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
929 
931 #define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
932 
934 #define RTE_MBUF_ASSERT(exp) do { } while (0)
935 
936 #endif /* RTE_LIBRTE_MBUF_DEBUG */
937 
938 #ifdef RTE_MBUF_REFCNT_ATOMIC
939 
947 static inline uint16_t
948 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
949 {
950  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
951 }
952 
960 static inline void
961 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
962 {
963  rte_atomic16_set(&m->refcnt_atomic, new_value);
964 }
965 
975 static inline uint16_t
976 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
977 {
978  /*
979  * The atomic_add is an expensive operation, so we don't want to
980  * call it in the case where we know we are the uniq holder of
981  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
982  * operation has to be used because concurrent accesses on the
983  * reference counter can occur.
984  */
985  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
986  rte_mbuf_refcnt_set(m, 1 + value);
987  return 1 + value;
988  }
989 
990  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
991 }
992 
993 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
994 
998 static inline uint16_t
999 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
1000 {
1001  m->refcnt = (uint16_t)(m->refcnt + value);
1002  return m->refcnt;
1003 }
1004 
1008 static inline uint16_t
1010 {
1011  return m->refcnt;
1012 }
1013 
1017 static inline void
1018 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
1019 {
1020  m->refcnt = new_value;
1021 }
1022 
1023 #endif /* RTE_MBUF_REFCNT_ATOMIC */
1024 
1026 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
1027  if ((m) != NULL) \
1028  rte_prefetch0(m); \
1029 } while (0)
1030 
1031 
1044 void
1045 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
1046 
1058 static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
1059 {
1060  struct rte_mbuf *m;
1061  void *mb = NULL;
1062  if (rte_mempool_get(mp, &mb) < 0)
1063  return NULL;
1064  m = (struct rte_mbuf *)mb;
1066  rte_mbuf_refcnt_set(m, 1);
1067  return m;
1068 }
1069 
1078 static inline void __attribute__((always_inline))
1079 __rte_mbuf_raw_free(struct rte_mbuf *m)
1080 {
1082  rte_mempool_put(m->pool, m);
1083 }
1084 
1085 /* Operations on ctrl mbuf */
1086 
1106 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1107  void *m, unsigned i);
1108 
1121 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
1122 
1129 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
1130 
1139 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
1140 
1149 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
1150 
1160 static inline int
1162 {
1163  return !!(m->ol_flags & CTRL_MBUF_FLAG);
1164 }
1165 
1166 /* Operations on pkt mbuf */
1167 
1187 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1188  void *m, unsigned i);
1189 
1190 
1207 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1208 
1244 struct rte_mempool *
1245 rte_pktmbuf_pool_create(const char *name, unsigned n,
1246  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1247  int socket_id);
1248 
1260 static inline uint16_t
1262 {
1263  struct rte_pktmbuf_pool_private *mbp_priv;
1264 
1265  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1266  return mbp_priv->mbuf_data_room_size;
1267 }
1268 
1281 static inline uint16_t
1283 {
1284  struct rte_pktmbuf_pool_private *mbp_priv;
1285 
1286  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1287  return mbp_priv->mbuf_priv_size;
1288 }
1289 
1298 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1299 {
1300  m->next = NULL;
1301  m->pkt_len = 0;
1302  m->tx_offload = 0;
1303  m->vlan_tci = 0;
1304  m->vlan_tci_outer = 0;
1305  m->nb_segs = 1;
1306  m->port = 0xff;
1307 
1308  m->ol_flags = 0;
1309  m->packet_type = 0;
1310  m->data_off = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
1311  RTE_PKTMBUF_HEADROOM : m->buf_len;
1312 
1313  m->data_len = 0;
1315 }
1316 
1330 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1331 {
1332  struct rte_mbuf *m;
1333  if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
1334  rte_pktmbuf_reset(m);
1335  return m;
1336 }
1337 
1353 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1354 {
1355  struct rte_mbuf *md;
1356 
1358  rte_mbuf_refcnt_read(mi) == 1);
1359 
1360  /* if m is not direct, get the mbuf that embeds the data */
1361  if (RTE_MBUF_DIRECT(m))
1362  md = m;
1363  else
1364  md = rte_mbuf_from_indirect(m);
1365 
1366  rte_mbuf_refcnt_update(md, 1);
1367  mi->priv_size = m->priv_size;
1368  mi->buf_physaddr = m->buf_physaddr;
1369  mi->buf_addr = m->buf_addr;
1370  mi->buf_len = m->buf_len;
1371 
1372  mi->next = m->next;
1373  mi->data_off = m->data_off;
1374  mi->data_len = m->data_len;
1375  mi->port = m->port;
1376  mi->vlan_tci = m->vlan_tci;
1377  mi->vlan_tci_outer = m->vlan_tci_outer;
1378  mi->tx_offload = m->tx_offload;
1379  mi->hash = m->hash;
1380 
1381  mi->next = NULL;
1382  mi->pkt_len = mi->data_len;
1383  mi->nb_segs = 1;
1384  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1385  mi->packet_type = m->packet_type;
1386 
1387  __rte_mbuf_sanity_check(mi, 1);
1389 }
1390 
1401 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1402 {
1403  struct rte_mempool *mp = m->pool;
1404  uint32_t mbuf_size, buf_len, priv_size;
1405 
1406  priv_size = rte_pktmbuf_priv_size(mp);
1407  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1408  buf_len = rte_pktmbuf_data_room_size(mp);
1409 
1410  m->priv_size = priv_size;
1411  m->buf_addr = (char *)m + mbuf_size;
1412  m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
1413  m->buf_len = (uint16_t)buf_len;
1414  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1415  m->data_len = 0;
1416  m->ol_flags = 0;
1417 }
1418 
1419 static inline struct rte_mbuf* __attribute__((always_inline))
1420 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1421 {
1423 
1424  if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
1425 
1426  /* if this is an indirect mbuf, then
1427  * - detach mbuf
1428  * - free attached mbuf segment
1429  */
1430  if (RTE_MBUF_INDIRECT(m)) {
1431  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1432  rte_pktmbuf_detach(m);
1433  if (rte_mbuf_refcnt_update(md, -1) == 0)
1434  __rte_mbuf_raw_free(md);
1435  }
1436  return m;
1437  }
1438  return NULL;
1439 }
1440 
1450 static inline void __attribute__((always_inline))
1452 {
1453  if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
1454  m->next = NULL;
1455  __rte_mbuf_raw_free(m);
1456  }
1457 }
1458 
1468 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1469 {
1470  struct rte_mbuf *m_next;
1471 
1473 
1474  while (m != NULL) {
1475  m_next = m->next;
1477  m = m_next;
1478  }
1479 }
1480 
1498 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1499  struct rte_mempool *mp)
1500 {
1501  struct rte_mbuf *mc, *mi, **prev;
1502  uint32_t pktlen;
1503  uint8_t nseg;
1504 
1505  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1506  return NULL;
1507 
1508  mi = mc;
1509  prev = &mi->next;
1510  pktlen = md->pkt_len;
1511  nseg = 0;
1512 
1513  do {
1514  nseg++;
1515  rte_pktmbuf_attach(mi, md);
1516  *prev = mi;
1517  prev = &mi->next;
1518  } while ((md = md->next) != NULL &&
1519  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1520 
1521  *prev = NULL;
1522  mc->nb_segs = nseg;
1523  mc->pkt_len = pktlen;
1524 
1525  /* Allocation of new indirect segment failed */
1526  if (unlikely (mi == NULL)) {
1527  rte_pktmbuf_free(mc);
1528  return NULL;
1529  }
1530 
1531  __rte_mbuf_sanity_check(mc, 1);
1532  return mc;
1533 }
1534 
1546 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1547 {
1549 
1550  do {
1551  rte_mbuf_refcnt_update(m, v);
1552  } while ((m = m->next) != NULL);
1553 }
1554 
1563 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1564 {
1566  return m->data_off;
1567 }
1568 
1577 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1578 {
1580  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1581  m->data_len);
1582 }
1583 
1592 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1593 {
1594  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1595 
1597  while (m2->next != NULL)
1598  m2 = m2->next;
1599  return m2;
1600 }
1601 
1616 #define rte_pktmbuf_mtod_offset(m, t, o) \
1617  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1618 
1631 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1632 
1642 #define rte_pktmbuf_mtophys_offset(m, o) \
1643  (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
1644 
1652 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
1653 
1662 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1663 
1672 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1673 
1689 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1690  uint16_t len)
1691 {
1693 
1694  if (unlikely(len > rte_pktmbuf_headroom(m)))
1695  return NULL;
1696 
1697  m->data_off -= len;
1698  m->data_len = (uint16_t)(m->data_len + len);
1699  m->pkt_len = (m->pkt_len + len);
1700 
1701  return (char *)m->buf_addr + m->data_off;
1702 }
1703 
1719 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1720 {
1721  void *tail;
1722  struct rte_mbuf *m_last;
1723 
1725 
1726  m_last = rte_pktmbuf_lastseg(m);
1727  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1728  return NULL;
1729 
1730  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1731  m_last->data_len = (uint16_t)(m_last->data_len + len);
1732  m->pkt_len = (m->pkt_len + len);
1733  return (char*) tail;
1734 }
1735 
1750 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1751 {
1753 
1754  if (unlikely(len > m->data_len))
1755  return NULL;
1756 
1757  m->data_len = (uint16_t)(m->data_len - len);
1758  m->data_off += len;
1759  m->pkt_len = (m->pkt_len - len);
1760  return (char *)m->buf_addr + m->data_off;
1761 }
1762 
1777 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1778 {
1779  struct rte_mbuf *m_last;
1780 
1782 
1783  m_last = rte_pktmbuf_lastseg(m);
1784  if (unlikely(len > m_last->data_len))
1785  return -1;
1786 
1787  m_last->data_len = (uint16_t)(m_last->data_len - len);
1788  m->pkt_len = (m->pkt_len - len);
1789  return 0;
1790 }
1791 
1801 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1802 {
1804  return !!(m->nb_segs == 1);
1805 }
1806 
1823 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1824 {
1825  struct rte_mbuf *cur_tail;
1826 
1827  /* Check for number-of-segments-overflow */
1828  if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
1829  return -EOVERFLOW;
1830 
1831  /* Chain 'tail' onto the old tail */
1832  cur_tail = rte_pktmbuf_lastseg(head);
1833  cur_tail->next = tail;
1834 
1835  /* accumulate number of segments and total length. */
1836  head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
1837  head->pkt_len += tail->pkt_len;
1838 
1839  /* pkt_len is only set in the head */
1840  tail->pkt_len = tail->data_len;
1841 
1842  return 0;
1843 }
1844 
1859 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1860 
1861 #ifdef __cplusplus
1862 }
1863 #endif
1864 
1865 #endif /* _RTE_MBUF_H_ */