DPDK  18.02.2
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_mbuf_ptype.h>
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Packet Offload Features Flags. It also carry packet type information.
51  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
52  *
53  * - RX flags start at bit position zero, and get added to the left of previous
54  * flags.
55  * - The most-significant 3 bits are reserved for generic mbuf flags
56  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
57  * added to the right of the previously defined flags i.e. they should count
58  * downwards, not upwards.
59  *
60  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
61  * rte_get_tx_ol_flag_name().
62  */
63 
71 #define PKT_RX_VLAN (1ULL << 0)
72 
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
84 
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
93 
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
103 
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
113 
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
118 
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
128 
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
133 
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
148 
154 #define PKT_RX_LRO (1ULL << 16)
155 
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
160 
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
165 
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
170 
178 #define PKT_RX_QINQ (1ULL << 20)
179 
180 /* add new RX flags here */
181 
182 /* add new TX flags here */
183 
189 #define PKT_TX_UDP_SEG (1ULL << 42)
190 
194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
195 
200 #define PKT_TX_MACSEC (1ULL << 44)
201 
210 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
211 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
212 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
213 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
214 
215 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
216 /* add new TX TUNNEL type here */
217 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
218 
222 #define PKT_TX_QINQ (1ULL << 49)
223 /* this old name is deprecated */
224 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
225 
235 #define PKT_TX_TCP_SEG (1ULL << 50)
236 
237 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
247 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
248 #define PKT_TX_TCP_CKSUM (1ULL << 52)
249 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
250 #define PKT_TX_UDP_CKSUM (3ULL << 52)
251 #define PKT_TX_L4_MASK (3ULL << 52)
259 #define PKT_TX_IP_CKSUM (1ULL << 54)
260 
267 #define PKT_TX_IPV4 (1ULL << 55)
268 
275 #define PKT_TX_IPV6 (1ULL << 56)
276 
280 #define PKT_TX_VLAN (1ULL << 57)
281 /* this old name is deprecated */
282 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
283 
290 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
291 
297 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
298 
304 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
305 
310 #define PKT_TX_OFFLOAD_MASK ( \
311  PKT_TX_IP_CKSUM | \
312  PKT_TX_L4_MASK | \
313  PKT_TX_OUTER_IP_CKSUM | \
314  PKT_TX_TCP_SEG | \
315  PKT_TX_IEEE1588_TMST | \
316  PKT_TX_QINQ_PKT | \
317  PKT_TX_VLAN_PKT | \
318  PKT_TX_TUNNEL_MASK | \
319  PKT_TX_MACSEC | \
320  PKT_TX_SEC_OFFLOAD)
321 
322 #define __RESERVED (1ULL << 61)
324 #define IND_ATTACHED_MBUF (1ULL << 62)
326 /* Use final bit of flags to indicate a control mbuf */
327 #define CTRL_MBUF_FLAG (1ULL << 63)
330 #define RTE_MBUF_PRIV_ALIGN 8
331 
340 const char *rte_get_rx_ol_flag_name(uint64_t mask);
341 
354 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
355 
366 const char *rte_get_tx_ol_flag_name(uint64_t mask);
367 
380 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
381 
388 #define RTE_MBUF_DEFAULT_DATAROOM 2048
389 #define RTE_MBUF_DEFAULT_BUF_SIZE \
390  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
391 
392 /* define a set of marker types that can be used to refer to set points in the
393  * mbuf */
394 __extension__
395 typedef void *MARKER[0];
396 __extension__
397 typedef uint8_t MARKER8[0];
398 __extension__
399 typedef uint64_t MARKER64[0];
405 struct rte_mbuf {
406  MARKER cacheline0;
407 
408  void *buf_addr;
416  union {
417  rte_iova_t buf_iova;
419  } __rte_aligned(sizeof(rte_iova_t));
420 
421  /* next 8 bytes are initialised on RX descriptor rearm */
422  MARKER64 rearm_data;
423  uint16_t data_off;
424 
435  union {
437  uint16_t refcnt;
438  };
439  uint16_t nb_segs;
442  uint16_t port;
443 
444  uint64_t ol_flags;
446  /* remaining bytes are set on RX when pulling packet from descriptor */
447  MARKER rx_descriptor_fields1;
448 
449  /*
450  * The packet type, which is the combination of outer/inner L2, L3, L4
451  * and tunnel types. The packet_type is about data really present in the
452  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
453  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
454  * vlan is stripped from the data.
455  */
457  union {
458  uint32_t packet_type;
459  struct {
460  uint32_t l2_type:4;
461  uint32_t l3_type:4;
462  uint32_t l4_type:4;
463  uint32_t tun_type:4;
465  union {
466  uint8_t inner_esp_next_proto;
471  __extension__
472  struct {
473  uint8_t inner_l2_type:4;
475  uint8_t inner_l3_type:4;
477  };
478  };
479  uint32_t inner_l4_type:4;
480  };
481  };
482 
483  uint32_t pkt_len;
484  uint16_t data_len;
486  uint16_t vlan_tci;
487 
488  union {
489  uint32_t rss;
490  struct {
492  union {
493  struct {
494  uint16_t hash;
495  uint16_t id;
496  };
497  uint32_t lo;
499  };
500  uint32_t hi;
503  } fdir;
504  struct {
505  uint32_t lo;
506  uint32_t hi;
507  } sched;
508  uint32_t usr;
509  } hash;
512  uint16_t vlan_tci_outer;
513 
514  uint16_t buf_len;
519  uint64_t timestamp;
520 
521  /* second cache line - fields only used in slow path or on TX */
522  MARKER cacheline1 __rte_cache_min_aligned;
523 
525  union {
526  void *userdata;
527  uint64_t udata64;
528  };
529 
530  struct rte_mempool *pool;
531  struct rte_mbuf *next;
533  /* fields to support TX offloads */
535  union {
536  uint64_t tx_offload;
537  __extension__
538  struct {
539  uint64_t l2_len:7;
543  uint64_t l3_len:9;
544  uint64_t l4_len:8;
545  uint64_t tso_segsz:16;
547  /* fields for TX offloading of tunnels */
548  uint64_t outer_l3_len:9;
549  uint64_t outer_l2_len:7;
551  /* uint64_t unused:8; */
552  };
553  };
554 
557  uint16_t priv_size;
558 
560  uint16_t timesync;
561 
563  uint32_t seqn;
564 
566 
568 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
569 
580 static inline void
582 {
583  rte_prefetch0(&m->cacheline0);
584 }
585 
597 static inline void
599 {
600 #if RTE_CACHE_LINE_SIZE == 64
601  rte_prefetch0(&m->cacheline1);
602 #else
603  RTE_SET_USED(m);
604 #endif
605 }
606 
607 
608 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
609 
618 static inline rte_iova_t
619 rte_mbuf_data_iova(const struct rte_mbuf *mb)
620 {
621  return mb->buf_iova + mb->data_off;
622 }
623 
624 __rte_deprecated
625 static inline phys_addr_t
626 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
627 {
628  return rte_mbuf_data_iova(mb);
629 }
630 
643 static inline rte_iova_t
645 {
646  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
647 }
648 
649 __rte_deprecated
650 static inline phys_addr_t
651 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
652 {
653  return rte_mbuf_data_iova_default(mb);
654 }
655 
664 static inline struct rte_mbuf *
666 {
667  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
668 }
669 
678 static inline char *
680 {
681  char *buffer_addr;
682  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
683  return buffer_addr;
684 }
685 
689 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
690 
694 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
695 
704  uint16_t mbuf_priv_size;
705 };
706 
707 #ifdef RTE_LIBRTE_MBUF_DEBUG
708 
710 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
711 
712 #else /* RTE_LIBRTE_MBUF_DEBUG */
713 
715 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
716 
717 #endif /* RTE_LIBRTE_MBUF_DEBUG */
718 
719 #ifdef RTE_MBUF_REFCNT_ATOMIC
720 
728 static inline uint16_t
729 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
730 {
731  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
732 }
733 
741 static inline void
742 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
743 {
744  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
745 }
746 
747 /* internal */
748 static inline uint16_t
749 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
750 {
751  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
752 }
753 
763 static inline uint16_t
764 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
765 {
766  /*
767  * The atomic_add is an expensive operation, so we don't want to
768  * call it in the case where we know we are the uniq holder of
769  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
770  * operation has to be used because concurrent accesses on the
771  * reference counter can occur.
772  */
773  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
774  ++value;
775  rte_mbuf_refcnt_set(m, (uint16_t)value);
776  return (uint16_t)value;
777  }
778 
779  return __rte_mbuf_refcnt_update(m, value);
780 }
781 
782 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
783 
784 /* internal */
785 static inline uint16_t
786 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
787 {
788  m->refcnt = (uint16_t)(m->refcnt + value);
789  return m->refcnt;
790 }
791 
795 static inline uint16_t
796 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
797 {
798  return __rte_mbuf_refcnt_update(m, value);
799 }
800 
804 static inline uint16_t
806 {
807  return m->refcnt;
808 }
809 
813 static inline void
814 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
815 {
816  m->refcnt = new_value;
817 }
818 
819 #endif /* RTE_MBUF_REFCNT_ATOMIC */
820 
822 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
823  if ((m) != NULL) \
824  rte_prefetch0(m); \
825 } while (0)
826 
827 
840 void
841 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
842 
843 #define MBUF_RAW_ALLOC_CHECK(m) do { \
844  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
845  RTE_ASSERT((m)->next == NULL); \
846  RTE_ASSERT((m)->nb_segs == 1); \
847  __rte_mbuf_sanity_check(m, 0); \
848 } while (0)
849 
869 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
870 {
871  struct rte_mbuf *m;
872 
873  if (rte_mempool_get(mp, (void **)&m) < 0)
874  return NULL;
875  MBUF_RAW_ALLOC_CHECK(m);
876  return m;
877 }
878 
893 static __rte_always_inline void
895 {
896  RTE_ASSERT(RTE_MBUF_DIRECT(m));
897  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
898  RTE_ASSERT(m->next == NULL);
899  RTE_ASSERT(m->nb_segs == 1);
901  rte_mempool_put(m->pool, m);
902 }
903 
904 /* compat with older versions */
905 __rte_deprecated
906 static inline void
907 __rte_mbuf_raw_free(struct rte_mbuf *m)
908 {
910 }
911 
912 /* Operations on ctrl mbuf */
913 
933 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
934  void *m, unsigned i);
935 
948 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
949 
956 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
957 
966 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
967 
976 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
977 
987 static inline int
989 {
990  return !!(m->ol_flags & CTRL_MBUF_FLAG);
991 }
992 
993 /* Operations on pkt mbuf */
994 
1014 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1015  void *m, unsigned i);
1016 
1017 
1035 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1036 
1071 struct rte_mempool *
1072 rte_pktmbuf_pool_create(const char *name, unsigned n,
1073  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1074  int socket_id);
1075 
1113 struct rte_mempool * __rte_experimental
1114 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1115  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1116  int socket_id, const char *ops_name);
1117 
1129 static inline uint16_t
1131 {
1132  struct rte_pktmbuf_pool_private *mbp_priv;
1133 
1134  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1135  return mbp_priv->mbuf_data_room_size;
1136 }
1137 
1150 static inline uint16_t
1152 {
1153  struct rte_pktmbuf_pool_private *mbp_priv;
1154 
1155  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1156  return mbp_priv->mbuf_priv_size;
1157 }
1158 
1167 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1168 {
1169  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1170  (uint16_t)m->buf_len);
1171 }
1172 
1181 #define MBUF_INVALID_PORT UINT16_MAX
1182 
1183 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1184 {
1185  m->next = NULL;
1186  m->pkt_len = 0;
1187  m->tx_offload = 0;
1188  m->vlan_tci = 0;
1189  m->vlan_tci_outer = 0;
1190  m->nb_segs = 1;
1191  m->port = MBUF_INVALID_PORT;
1192 
1193  m->ol_flags = 0;
1194  m->packet_type = 0;
1196 
1197  m->data_len = 0;
1199 }
1200 
1214 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1215 {
1216  struct rte_mbuf *m;
1217  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1218  rte_pktmbuf_reset(m);
1219  return m;
1220 }
1221 
1236 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1237  struct rte_mbuf **mbufs, unsigned count)
1238 {
1239  unsigned idx = 0;
1240  int rc;
1241 
1242  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1243  if (unlikely(rc))
1244  return rc;
1245 
1246  /* To understand duff's device on loop unwinding optimization, see
1247  * https://en.wikipedia.org/wiki/Duff's_device.
1248  * Here while() loop is used rather than do() while{} to avoid extra
1249  * check if count is zero.
1250  */
1251  switch (count % 4) {
1252  case 0:
1253  while (idx != count) {
1254  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1255  rte_pktmbuf_reset(mbufs[idx]);
1256  idx++;
1257  /* fall-through */
1258  case 3:
1259  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1260  rte_pktmbuf_reset(mbufs[idx]);
1261  idx++;
1262  /* fall-through */
1263  case 2:
1264  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1265  rte_pktmbuf_reset(mbufs[idx]);
1266  idx++;
1267  /* fall-through */
1268  case 1:
1269  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1270  rte_pktmbuf_reset(mbufs[idx]);
1271  idx++;
1272  /* fall-through */
1273  }
1274  }
1275  return 0;
1276 }
1277 
1295 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1296 {
1297  struct rte_mbuf *md;
1298 
1299  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1300  rte_mbuf_refcnt_read(mi) == 1);
1301 
1302  /* if m is not direct, get the mbuf that embeds the data */
1303  if (RTE_MBUF_DIRECT(m))
1304  md = m;
1305  else
1306  md = rte_mbuf_from_indirect(m);
1307 
1308  rte_mbuf_refcnt_update(md, 1);
1309  mi->priv_size = m->priv_size;
1310  mi->buf_iova = m->buf_iova;
1311  mi->buf_addr = m->buf_addr;
1312  mi->buf_len = m->buf_len;
1313 
1314  mi->data_off = m->data_off;
1315  mi->data_len = m->data_len;
1316  mi->port = m->port;
1317  mi->vlan_tci = m->vlan_tci;
1318  mi->vlan_tci_outer = m->vlan_tci_outer;
1319  mi->tx_offload = m->tx_offload;
1320  mi->hash = m->hash;
1321 
1322  mi->next = NULL;
1323  mi->pkt_len = mi->data_len;
1324  mi->nb_segs = 1;
1325  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1326  mi->packet_type = m->packet_type;
1327  mi->timestamp = m->timestamp;
1328 
1329  __rte_mbuf_sanity_check(mi, 1);
1331 }
1332 
1346 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1347 {
1348  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1349  struct rte_mempool *mp = m->pool;
1350  uint32_t mbuf_size, buf_len;
1351  uint16_t priv_size;
1352 
1353  priv_size = rte_pktmbuf_priv_size(mp);
1354  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1355  buf_len = rte_pktmbuf_data_room_size(mp);
1356 
1357  m->priv_size = priv_size;
1358  m->buf_addr = (char *)m + mbuf_size;
1359  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1360  m->buf_len = (uint16_t)buf_len;
1362  m->data_len = 0;
1363  m->ol_flags = 0;
1364 
1365  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1366  md->next = NULL;
1367  md->nb_segs = 1;
1368  rte_mbuf_refcnt_set(md, 1);
1369  rte_mbuf_raw_free(md);
1370  }
1371 }
1372 
1387 static __rte_always_inline struct rte_mbuf *
1389 {
1391 
1392  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1393 
1394  if (RTE_MBUF_INDIRECT(m))
1395  rte_pktmbuf_detach(m);
1396 
1397  if (m->next != NULL) {
1398  m->next = NULL;
1399  m->nb_segs = 1;
1400  }
1401 
1402  return m;
1403 
1404  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1405 
1406  if (RTE_MBUF_INDIRECT(m))
1407  rte_pktmbuf_detach(m);
1408 
1409  if (m->next != NULL) {
1410  m->next = NULL;
1411  m->nb_segs = 1;
1412  }
1413  rte_mbuf_refcnt_set(m, 1);
1414 
1415  return m;
1416  }
1417  return NULL;
1418 }
1419 
1420 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1421 __rte_deprecated
1422 static inline struct rte_mbuf *
1423 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1424 {
1425  return rte_pktmbuf_prefree_seg(m);
1426 }
1427 
1437 static __rte_always_inline void
1439 {
1440  m = rte_pktmbuf_prefree_seg(m);
1441  if (likely(m != NULL))
1442  rte_mbuf_raw_free(m);
1443 }
1444 
1454 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1455 {
1456  struct rte_mbuf *m_next;
1457 
1458  if (m != NULL)
1460 
1461  while (m != NULL) {
1462  m_next = m->next;
1464  m = m_next;
1465  }
1466 }
1467 
1485 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1486  struct rte_mempool *mp)
1487 {
1488  struct rte_mbuf *mc, *mi, **prev;
1489  uint32_t pktlen;
1490  uint16_t nseg;
1491 
1492  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1493  return NULL;
1494 
1495  mi = mc;
1496  prev = &mi->next;
1497  pktlen = md->pkt_len;
1498  nseg = 0;
1499 
1500  do {
1501  nseg++;
1502  rte_pktmbuf_attach(mi, md);
1503  *prev = mi;
1504  prev = &mi->next;
1505  } while ((md = md->next) != NULL &&
1506  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1507 
1508  *prev = NULL;
1509  mc->nb_segs = nseg;
1510  mc->pkt_len = pktlen;
1511 
1512  /* Allocation of new indirect segment failed */
1513  if (unlikely (mi == NULL)) {
1514  rte_pktmbuf_free(mc);
1515  return NULL;
1516  }
1517 
1518  __rte_mbuf_sanity_check(mc, 1);
1519  return mc;
1520 }
1521 
1533 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1534 {
1536 
1537  do {
1538  rte_mbuf_refcnt_update(m, v);
1539  } while ((m = m->next) != NULL);
1540 }
1541 
1550 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1551 {
1553  return m->data_off;
1554 }
1555 
1564 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1565 {
1567  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1568  m->data_len);
1569 }
1570 
1579 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1580 {
1582  while (m->next != NULL)
1583  m = m->next;
1584  return m;
1585 }
1586 
1601 #define rte_pktmbuf_mtod_offset(m, t, o) \
1602  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1603 
1616 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1617 
1627 #define rte_pktmbuf_iova_offset(m, o) \
1628  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1629 
1630 /* deprecated */
1631 #define rte_pktmbuf_mtophys_offset(m, o) \
1632  rte_pktmbuf_iova_offset(m, o)
1633 
1641 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1642 
1643 /* deprecated */
1644 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1645 
1654 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1655 
1664 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1665 
1681 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1682  uint16_t len)
1683 {
1685 
1686  if (unlikely(len > rte_pktmbuf_headroom(m)))
1687  return NULL;
1688 
1689  /* NB: elaborating the subtraction like this instead of using
1690  * -= allows us to ensure the result type is uint16_t
1691  * avoiding compiler warnings on gcc 8.1 at least */
1692  m->data_off = (uint16_t)(m->data_off - len);
1693  m->data_len = (uint16_t)(m->data_len + len);
1694  m->pkt_len = (m->pkt_len + len);
1695 
1696  return (char *)m->buf_addr + m->data_off;
1697 }
1698 
1714 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1715 {
1716  void *tail;
1717  struct rte_mbuf *m_last;
1718 
1720 
1721  m_last = rte_pktmbuf_lastseg(m);
1722  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1723  return NULL;
1724 
1725  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1726  m_last->data_len = (uint16_t)(m_last->data_len + len);
1727  m->pkt_len = (m->pkt_len + len);
1728  return (char*) tail;
1729 }
1730 
1745 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1746 {
1748 
1749  if (unlikely(len > m->data_len))
1750  return NULL;
1751 
1752  /* NB: elaborating the addition like this instead of using
1753  * += allows us to ensure the result type is uint16_t
1754  * avoiding compiler warnings on gcc 8.1 at least */
1755  m->data_len = (uint16_t)(m->data_len - len);
1756  m->data_off = (uint16_t)(m->data_off + len);
1757  m->pkt_len = (m->pkt_len - len);
1758  return (char *)m->buf_addr + m->data_off;
1759 }
1760 
1775 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1776 {
1777  struct rte_mbuf *m_last;
1778 
1780 
1781  m_last = rte_pktmbuf_lastseg(m);
1782  if (unlikely(len > m_last->data_len))
1783  return -1;
1784 
1785  m_last->data_len = (uint16_t)(m_last->data_len - len);
1786  m->pkt_len = (m->pkt_len - len);
1787  return 0;
1788 }
1789 
1799 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1800 {
1802  return !!(m->nb_segs == 1);
1803 }
1804 
1808 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1809  uint32_t len, void *buf);
1810 
1831 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1832  uint32_t off, uint32_t len, void *buf)
1833 {
1834  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1835  return rte_pktmbuf_mtod_offset(m, char *, off);
1836  else
1837  return __rte_pktmbuf_read(m, off, len, buf);
1838 }
1839 
1856 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1857 {
1858  struct rte_mbuf *cur_tail;
1859 
1860  /* Check for number-of-segments-overflow */
1861  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1862  return -EOVERFLOW;
1863 
1864  /* Chain 'tail' onto the old tail */
1865  cur_tail = rte_pktmbuf_lastseg(head);
1866  cur_tail->next = tail;
1867 
1868  /* accumulate number of segments and total length.
1869  * NB: elaborating the addition like this instead of using
1870  * -= allows us to ensure the result type is uint16_t
1871  * avoiding compiler warnings on gcc 8.1 at least */
1872  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1873  head->pkt_len += tail->pkt_len;
1874 
1875  /* pkt_len is only set in the head */
1876  tail->pkt_len = tail->data_len;
1877 
1878  return 0;
1879 }
1880 
1891 static inline int
1893 {
1894  uint64_t ol_flags = m->ol_flags;
1895  uint64_t inner_l3_offset = m->l2_len;
1896 
1897  /* Does packet set any of available offloads? */
1898  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1899  return 0;
1900 
1901  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1902  /* NB: elaborating the addition like this instead of using
1903  * += gives the result uint64_t type instead of int,
1904  * avoiding compiler warnings on gcc 8.1 at least */
1905  inner_l3_offset = inner_l3_offset + m->outer_l2_len +
1906  m->outer_l3_len;
1907 
1908  /* Headers are fragmented */
1909  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1910  return -ENOTSUP;
1911 
1912  /* IP checksum can be counted only for IPv4 packet */
1913  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1914  return -EINVAL;
1915 
1916  /* IP type not set when required */
1917  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1918  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1919  return -EINVAL;
1920 
1921  /* Check requirements for TSO packet */
1922  if (ol_flags & PKT_TX_TCP_SEG)
1923  if ((m->tso_segsz == 0) ||
1924  ((ol_flags & PKT_TX_IPV4) &&
1925  !(ol_flags & PKT_TX_IP_CKSUM)))
1926  return -EINVAL;
1927 
1928  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1929  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1930  !(ol_flags & PKT_TX_OUTER_IPV4))
1931  return -EINVAL;
1932 
1933  return 0;
1934 }
1935 
1948 static inline int
1950 {
1951  size_t seg_len, copy_len;
1952  struct rte_mbuf *m;
1953  struct rte_mbuf *m_next;
1954  char *buffer;
1955 
1956  if (rte_pktmbuf_is_contiguous(mbuf))
1957  return 0;
1958 
1959  /* Extend first segment to the total packet length */
1960  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1961 
1962  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1963  return -1;
1964 
1965  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1966  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1967 
1968  /* Append data from next segments to the first one */
1969  m = mbuf->next;
1970  while (m != NULL) {
1971  m_next = m->next;
1972 
1973  seg_len = rte_pktmbuf_data_len(m);
1974  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1975  buffer += seg_len;
1976 
1978  m = m_next;
1979  }
1980 
1981  mbuf->next = NULL;
1982  mbuf->nb_segs = 1;
1983 
1984  return 0;
1985 }
1986 
2001 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2002 
2003 #ifdef __cplusplus
2004 }
2005 #endif
2006 
2007 #endif /* _RTE_MBUF_H_ */