DPDK  16.11.11
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
56 #include <stdint.h>
57 #include <rte_common.h>
58 #include <rte_mempool.h>
59 #include <rte_memory.h>
60 #include <rte_atomic.h>
61 #include <rte_prefetch.h>
62 #include <rte_branch_prediction.h>
63 #include <rte_mbuf_ptype.h>
64 
65 #ifdef __cplusplus
66 extern "C" {
67 #endif
68 
69 /*
70  * Packet Offload Features Flags. It also carry packet type information.
71  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
72  *
73  * - RX flags start at bit position zero, and get added to the left of previous
74  * flags.
75  * - The most-significant 3 bits are reserved for generic mbuf flags
76  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
77  * added to the right of the previously defined flags i.e. they should count
78  * downwards, not upwards.
79  *
80  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
81  * rte_get_tx_ol_flag_name().
82  */
83 
90 #define PKT_RX_VLAN_PKT (1ULL << 0)
91 
92 #define PKT_RX_RSS_HASH (1ULL << 1)
93 #define PKT_RX_FDIR (1ULL << 2)
102 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
103 
111 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
112 
113 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
120 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
121 
130 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
131 
132 #define PKT_RX_IP_CKSUM_UNKNOWN 0
133 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
134 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
135 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
136 
145 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
146 
147 #define PKT_RX_L4_CKSUM_UNKNOWN 0
148 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
149 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
150 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
151 
152 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
153 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
154 #define PKT_RX_FDIR_ID (1ULL << 13)
155 #define PKT_RX_FDIR_FLX (1ULL << 14)
164 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
165 
171 #define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
172 
178 #define PKT_RX_LRO (1ULL << 16)
179 
180 /* add new RX flags here */
181 
182 /* add new TX flags here */
183 
189 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
190 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
191 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
192 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
193 /* add new TX TUNNEL type here */
194 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
195 
199 #define PKT_TX_QINQ_PKT (1ULL << 49)
210 #define PKT_TX_TCP_SEG (1ULL << 50)
211 
212 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
222 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
223 #define PKT_TX_TCP_CKSUM (1ULL << 52)
224 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
225 #define PKT_TX_UDP_CKSUM (3ULL << 52)
226 #define PKT_TX_L4_MASK (3ULL << 52)
234 #define PKT_TX_IP_CKSUM (1ULL << 54)
235 
242 #define PKT_TX_IPV4 (1ULL << 55)
243 
250 #define PKT_TX_IPV6 (1ULL << 56)
251 
252 #define PKT_TX_VLAN_PKT (1ULL << 57)
260 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
261 
267 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
268 
274 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
275 
276 #define __RESERVED (1ULL << 61)
278 #define IND_ATTACHED_MBUF (1ULL << 62)
280 /* Use final bit of flags to indicate a control mbuf */
281 #define CTRL_MBUF_FLAG (1ULL << 63)
284 #define RTE_MBUF_PRIV_ALIGN 8
285 
294 const char *rte_get_rx_ol_flag_name(uint64_t mask);
295 
308 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
309 
320 const char *rte_get_tx_ol_flag_name(uint64_t mask);
321 
334 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
335 
342 #define RTE_MBUF_DEFAULT_DATAROOM 2048
343 #define RTE_MBUF_DEFAULT_BUF_SIZE \
344  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
345 
346 /* define a set of marker types that can be used to refer to set points in the
347  * mbuf */
348 __extension__
349 typedef void *MARKER[0];
350 __extension__
351 typedef uint8_t MARKER8[0];
352 __extension__
353 typedef uint64_t MARKER64[0];
359 struct rte_mbuf {
360  MARKER cacheline0;
361 
362  void *buf_addr;
365  uint16_t buf_len;
367  /* next 6 bytes are initialised on RX descriptor rearm */
368  MARKER8 rearm_data;
369  uint16_t data_off;
370 
380  union {
382  uint16_t refcnt;
383  };
384  uint8_t nb_segs;
385  uint8_t port;
387  uint64_t ol_flags;
389  /* remaining bytes are set on RX when pulling packet from descriptor */
390  MARKER rx_descriptor_fields1;
391 
392  /*
393  * The packet type, which is the combination of outer/inner L2, L3, L4
394  * and tunnel types. The packet_type is about data really present in the
395  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
396  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
397  * vlan is stripped from the data.
398  */
400  union {
401  uint32_t packet_type;
402  struct {
403  uint32_t l2_type:4;
404  uint32_t l3_type:4;
405  uint32_t l4_type:4;
406  uint32_t tun_type:4;
407  uint32_t inner_l2_type:4;
408  uint32_t inner_l3_type:4;
409  uint32_t inner_l4_type:4;
410  };
411  };
412 
413  uint32_t pkt_len;
414  uint16_t data_len;
416  uint16_t vlan_tci;
417 
418  union {
419  uint32_t rss;
420  struct {
422  union {
423  struct {
424  uint16_t hash;
425  uint16_t id;
426  };
427  uint32_t lo;
429  };
430  uint32_t hi;
433  } fdir;
434  struct {
435  uint32_t lo;
436  uint32_t hi;
437  } sched;
438  uint32_t usr;
439  } hash;
441  uint32_t seqn;
444  uint16_t vlan_tci_outer;
445 
446  /* second cache line - fields only used in slow path or on TX */
447  MARKER cacheline1 __rte_cache_min_aligned;
448 
450  union {
451  void *userdata;
452  uint64_t udata64;
453  };
454 
455  struct rte_mempool *pool;
456  struct rte_mbuf *next;
458  /* fields to support TX offloads */
460  union {
461  uint64_t tx_offload;
462  __extension__
463  struct {
464  uint64_t l2_len:7;
468  uint64_t l3_len:9;
469  uint64_t l4_len:8;
470  uint64_t tso_segsz:16;
472  /* fields for TX offloading of tunnels */
473  uint64_t outer_l3_len:9;
474  uint64_t outer_l2_len:7;
476  /* uint64_t unused:8; */
477  };
478  };
479 
482  uint16_t priv_size;
483 
485  uint16_t timesync;
487 
498 static inline void
500 {
501  rte_prefetch0(&m->cacheline0);
502 }
503 
515 static inline void
517 {
518 #if RTE_CACHE_LINE_SIZE == 64
519  rte_prefetch0(&m->cacheline1);
520 #else
521  RTE_SET_USED(m);
522 #endif
523 }
524 
525 
526 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
527 
536 static inline phys_addr_t
538 {
539  return mb->buf_physaddr + mb->data_off;
540 }
541 
554 static inline phys_addr_t
556 {
557  return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
558 }
559 
568 static inline struct rte_mbuf *
570 {
571  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
572 }
573 
582 static inline char *
584 {
585  char *buffer_addr;
586  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
587  return buffer_addr;
588 }
589 
593 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
594 
598 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
599 
608  uint16_t mbuf_priv_size;
609 };
610 
611 #ifdef RTE_LIBRTE_MBUF_DEBUG
612 
614 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
615 
616 #else /* RTE_LIBRTE_MBUF_DEBUG */
617 
619 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
620 
621 #endif /* RTE_LIBRTE_MBUF_DEBUG */
622 
623 #ifdef RTE_MBUF_REFCNT_ATOMIC
624 
632 static inline uint16_t
633 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
634 {
635  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
636 }
637 
645 static inline void
646 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
647 {
648  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
649 }
650 
660 static inline uint16_t
661 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
662 {
663  /*
664  * The atomic_add is an expensive operation, so we don't want to
665  * call it in the case where we know we are the uniq holder of
666  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
667  * operation has to be used because concurrent accesses on the
668  * reference counter can occur.
669  */
670  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
671  ++value;
672  rte_mbuf_refcnt_set(m, (uint16_t)value);
673  return (uint16_t)value;
674  }
675 
676  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
677 }
678 
679 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
680 
684 static inline uint16_t
685 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
686 {
687  m->refcnt = (uint16_t)(m->refcnt + value);
688  return m->refcnt;
689 }
690 
694 static inline uint16_t
696 {
697  return m->refcnt;
698 }
699 
703 static inline void
704 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
705 {
706  m->refcnt = new_value;
707 }
708 
709 #endif /* RTE_MBUF_REFCNT_ATOMIC */
710 
712 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
713  if ((m) != NULL) \
714  rte_prefetch0(m); \
715 } while (0)
716 
717 
730 void
731 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
732 
747 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
748 {
749  struct rte_mbuf *m;
750  void *mb = NULL;
751 
752  if (rte_mempool_get(mp, &mb) < 0)
753  return NULL;
754  m = (struct rte_mbuf *)mb;
755  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
756  rte_mbuf_refcnt_set(m, 1);
758 
759  return m;
760 }
761 
770 static inline void __attribute__((always_inline))
771 __rte_mbuf_raw_free(struct rte_mbuf *m)
772 {
773  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
774  rte_mempool_put(m->pool, m);
775 }
776 
777 /* Operations on ctrl mbuf */
778 
798 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
799  void *m, unsigned i);
800 
813 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
814 
821 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
822 
831 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
832 
841 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
842 
852 static inline int
854 {
855  return !!(m->ol_flags & CTRL_MBUF_FLAG);
856 }
857 
858 /* Operations on pkt mbuf */
859 
879 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
880  void *m, unsigned i);
881 
882 
899 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
900 
936 struct rte_mempool *
937 rte_pktmbuf_pool_create(const char *name, unsigned n,
938  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
939  int socket_id);
940 
952 static inline uint16_t
954 {
955  struct rte_pktmbuf_pool_private *mbp_priv;
956 
957  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
958  return mbp_priv->mbuf_data_room_size;
959 }
960 
973 static inline uint16_t
975 {
976  struct rte_pktmbuf_pool_private *mbp_priv;
977 
978  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
979  return mbp_priv->mbuf_priv_size;
980 }
981 
990 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
991 {
992  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
993  (uint16_t)m->buf_len);
994 }
995 
1004 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1005 {
1006  m->next = NULL;
1007  m->pkt_len = 0;
1008  m->tx_offload = 0;
1009  m->vlan_tci = 0;
1010  m->vlan_tci_outer = 0;
1011  m->nb_segs = 1;
1012  m->port = 0xff;
1013 
1014  m->ol_flags = 0;
1015  m->packet_type = 0;
1017 
1018  m->data_len = 0;
1020 }
1021 
1035 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1036 {
1037  struct rte_mbuf *m;
1038  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1039  rte_pktmbuf_reset(m);
1040  return m;
1041 }
1042 
1057 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1058  struct rte_mbuf **mbufs, unsigned count)
1059 {
1060  unsigned idx = 0;
1061  int rc;
1062 
1063  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1064  if (unlikely(rc))
1065  return rc;
1066 
1067  /* To understand duff's device on loop unwinding optimization, see
1068  * https://en.wikipedia.org/wiki/Duff's_device.
1069  * Here while() loop is used rather than do() while{} to avoid extra
1070  * check if count is zero.
1071  */
1072  switch (count % 4) {
1073  case 0:
1074  while (idx != count) {
1075  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1076  rte_mbuf_refcnt_set(mbufs[idx], 1);
1077  rte_pktmbuf_reset(mbufs[idx]);
1078  idx++;
1079  case 3:
1080  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1081  rte_mbuf_refcnt_set(mbufs[idx], 1);
1082  rte_pktmbuf_reset(mbufs[idx]);
1083  idx++;
1084  case 2:
1085  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1086  rte_mbuf_refcnt_set(mbufs[idx], 1);
1087  rte_pktmbuf_reset(mbufs[idx]);
1088  idx++;
1089  case 1:
1090  RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
1091  rte_mbuf_refcnt_set(mbufs[idx], 1);
1092  rte_pktmbuf_reset(mbufs[idx]);
1093  idx++;
1094  }
1095  }
1096  return 0;
1097 }
1098 
1116 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1117 {
1118  struct rte_mbuf *md;
1119 
1120  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1121  rte_mbuf_refcnt_read(mi) == 1);
1122 
1123  /* if m is not direct, get the mbuf that embeds the data */
1124  if (RTE_MBUF_DIRECT(m))
1125  md = m;
1126  else
1127  md = rte_mbuf_from_indirect(m);
1128 
1129  rte_mbuf_refcnt_update(md, 1);
1130  mi->priv_size = m->priv_size;
1131  mi->buf_physaddr = m->buf_physaddr;
1132  mi->buf_addr = m->buf_addr;
1133  mi->buf_len = m->buf_len;
1134 
1135  mi->next = m->next;
1136  mi->data_off = m->data_off;
1137  mi->data_len = m->data_len;
1138  mi->port = m->port;
1139  mi->vlan_tci = m->vlan_tci;
1140  mi->vlan_tci_outer = m->vlan_tci_outer;
1141  mi->tx_offload = m->tx_offload;
1142  mi->hash = m->hash;
1143 
1144  mi->next = NULL;
1145  mi->pkt_len = mi->data_len;
1146  mi->nb_segs = 1;
1147  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1148  mi->packet_type = m->packet_type;
1149 
1150  __rte_mbuf_sanity_check(mi, 1);
1152 }
1153 
1167 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1168 {
1169  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1170  struct rte_mempool *mp = m->pool;
1171  uint32_t mbuf_size, buf_len;
1172  uint16_t priv_size;
1173 
1174  priv_size = rte_pktmbuf_priv_size(mp);
1175  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1176  buf_len = rte_pktmbuf_data_room_size(mp);
1177 
1178  m->priv_size = priv_size;
1179  m->buf_addr = (char *)m + mbuf_size;
1180  m->buf_physaddr = rte_mempool_virt2phy(mp, m) + mbuf_size;
1181  m->buf_len = (uint16_t)buf_len;
1183  m->data_len = 0;
1184  m->ol_flags = 0;
1185 
1186  if (rte_mbuf_refcnt_update(md, -1) == 0)
1187  __rte_mbuf_raw_free(md);
1188 }
1189 
1190 static inline struct rte_mbuf* __attribute__((always_inline))
1191 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1192 {
1194 
1195  if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
1196  /* if this is an indirect mbuf, it is detached. */
1197  if (RTE_MBUF_INDIRECT(m))
1198  rte_pktmbuf_detach(m);
1199  return m;
1200  }
1201  return NULL;
1202 }
1203 
1213 static inline void __attribute__((always_inline))
1215 {
1216  if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
1217  m->next = NULL;
1218  __rte_mbuf_raw_free(m);
1219  }
1220 }
1221 
1231 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1232 {
1233  struct rte_mbuf *m_next;
1234 
1235  if (m != NULL)
1237 
1238  while (m != NULL) {
1239  m_next = m->next;
1241  m = m_next;
1242  }
1243 }
1244 
1262 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1263  struct rte_mempool *mp)
1264 {
1265  struct rte_mbuf *mc, *mi, **prev;
1266  uint32_t pktlen;
1267  uint8_t nseg;
1268 
1269  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1270  return NULL;
1271 
1272  mi = mc;
1273  prev = &mi->next;
1274  pktlen = md->pkt_len;
1275  nseg = 0;
1276 
1277  do {
1278  nseg++;
1279  rte_pktmbuf_attach(mi, md);
1280  *prev = mi;
1281  prev = &mi->next;
1282  } while ((md = md->next) != NULL &&
1283  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1284 
1285  *prev = NULL;
1286  mc->nb_segs = nseg;
1287  mc->pkt_len = pktlen;
1288 
1289  /* Allocation of new indirect segment failed */
1290  if (unlikely (mi == NULL)) {
1291  rte_pktmbuf_free(mc);
1292  return NULL;
1293  }
1294 
1295  __rte_mbuf_sanity_check(mc, 1);
1296  return mc;
1297 }
1298 
1310 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1311 {
1313 
1314  do {
1315  rte_mbuf_refcnt_update(m, v);
1316  } while ((m = m->next) != NULL);
1317 }
1318 
1327 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1328 {
1330  return m->data_off;
1331 }
1332 
1341 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1342 {
1344  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1345  m->data_len);
1346 }
1347 
1356 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1357 {
1359  while (m->next != NULL)
1360  m = m->next;
1361  return m;
1362 }
1363 
1378 #define rte_pktmbuf_mtod_offset(m, t, o) \
1379  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1380 
1393 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1394 
1404 #define rte_pktmbuf_mtophys_offset(m, o) \
1405  (phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
1406 
1414 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
1415 
1424 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1425 
1434 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1435 
1451 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1452  uint16_t len)
1453 {
1455 
1456  if (unlikely(len > rte_pktmbuf_headroom(m)))
1457  return NULL;
1458 
1459  /* NB: elaborating the subtraction like this instead of using
1460  * -= allows us to ensure the result type is uint16_t
1461  * avoiding compiler warnings on gcc 8.1 at least */
1462  m->data_off = (uint16_t)(m->data_off - len);
1463  m->data_len = (uint16_t)(m->data_len + len);
1464  m->pkt_len = (m->pkt_len + len);
1465 
1466  return (char *)m->buf_addr + m->data_off;
1467 }
1468 
1484 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1485 {
1486  void *tail;
1487  struct rte_mbuf *m_last;
1488 
1490 
1491  m_last = rte_pktmbuf_lastseg(m);
1492  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1493  return NULL;
1494 
1495  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1496  m_last->data_len = (uint16_t)(m_last->data_len + len);
1497  m->pkt_len = (m->pkt_len + len);
1498  return (char*) tail;
1499 }
1500 
1515 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1516 {
1518 
1519  if (unlikely(len > m->data_len))
1520  return NULL;
1521 
1522  /* NB: elaborating the addition like this instead of using
1523  * += allows us to ensure the result type is uint16_t
1524  * avoiding compiler warnings on gcc 8.1 at least */
1525  m->data_len = (uint16_t)(m->data_len - len);
1526  m->data_off = (uint16_t)(m->data_off + len);
1527  m->pkt_len = (m->pkt_len - len);
1528  return (char *)m->buf_addr + m->data_off;
1529 }
1530 
1545 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1546 {
1547  struct rte_mbuf *m_last;
1548 
1550 
1551  m_last = rte_pktmbuf_lastseg(m);
1552  if (unlikely(len > m_last->data_len))
1553  return -1;
1554 
1555  m_last->data_len = (uint16_t)(m_last->data_len - len);
1556  m->pkt_len = (m->pkt_len - len);
1557  return 0;
1558 }
1559 
1569 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1570 {
1572  return !!(m->nb_segs == 1);
1573 }
1574 
1578 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1579  uint32_t len, void *buf);
1580 
1601 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1602  uint32_t off, uint32_t len, void *buf)
1603 {
1604  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1605  return rte_pktmbuf_mtod_offset(m, char *, off);
1606  else
1607  return __rte_pktmbuf_read(m, off, len, buf);
1608 }
1609 
1626 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1627 {
1628  struct rte_mbuf *cur_tail;
1629 
1630  /* Check for number-of-segments-overflow */
1631  if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
1632  return -EOVERFLOW;
1633 
1634  /* Chain 'tail' onto the old tail */
1635  cur_tail = rte_pktmbuf_lastseg(head);
1636  cur_tail->next = tail;
1637 
1638  /* accumulate number of segments and total length.
1639  * NB: elaborating the addition like this instead of using
1640  * -= allows us to ensure the result type is uint16_t
1641  * avoiding compiler warnings on gcc 8.1 at least */
1642  head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
1643  head->pkt_len += tail->pkt_len;
1644 
1645  /* pkt_len is only set in the head */
1646  tail->pkt_len = tail->data_len;
1647 
1648  return 0;
1649 }
1650 
1665 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1666 
1667 #ifdef __cplusplus
1668 }
1669 #endif
1670 
1671 #endif /* _RTE_MBUF_H_ */
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:1004
struct rte_mbuf * next
Definition: rte_mbuf.h:456
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:607
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:444
static int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1495
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:177
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1035
static void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1214
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:349
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:598
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:278
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:974
uint8_t port
Definition: rte_mbuf.h:385
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1231
static phys_addr_t rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
Definition: rte_mempool.h:1621
uint64_t l2_len
Definition: rte_mbuf.h:464
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
struct rte_mbuf::@74::@82 sched
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1262
void * buf_addr
Definition: rte_mbuf.h:362
uint32_t l2_type
Definition: rte_mbuf.h:403
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:569
uint16_t data_len
Definition: rte_mbuf.h:414
uint32_t lo
Definition: rte_mbuf.h:427
void * userdata
Definition: rte_mbuf.h:451
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1626
uint8_t nb_segs
Definition: rte_mbuf.h:384
uint64_t tso_segsz
Definition: rte_mbuf.h:470
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:351
uint64_t l4_len
Definition: rte_mbuf.h:469
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1327
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1057
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:990
uint32_t cache_size
Definition: rte_mempool.h:230
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:516
static int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: rte_mempool.h:1417
uint64_t outer_l3_len
Definition: rte_mbuf.h:473
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1569
struct rte_mempool * mp
uint64_t l3_len
Definition: rte_mbuf.h:468
uint32_t l4_type
Definition: rte_mbuf.h:405
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1341
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:482
uint16_t timesync
Definition: rte_mbuf.h:485
uint32_t hi
Definition: rte_mbuf.h:430
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:353
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:269
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:619
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:695
uint64_t outer_l2_len
Definition: rte_mbuf.h:474
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:191
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:281
uint16_t refcnt
Definition: rte_mbuf.h:382
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1515
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1116
uint32_t tun_type
Definition: rte_mbuf.h:406
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:272
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:853
uint64_t ol_flags
Definition: rte_mbuf.h:387
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1167
uint32_t pkt_len
Definition: rte_mbuf.h:413
uint16_t buf_len
Definition: rte_mbuf.h:365
uint32_t inner_l4_type
Definition: rte_mbuf.h:409
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1434
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:685
uint32_t packet_type
Definition: rte_mbuf.h:401
uint32_t seqn
Definition: rte_mbuf.h:441
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:953
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static phys_addr_t rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:537
phys_addr_t buf_physaddr
Definition: rte_mbuf.h:363
#define RTE_STD_C11
Definition: rte_common.h:64
struct rte_mbuf::@74::@81 fdir
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:455
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1484
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:704
uint32_t inner_l3_type
Definition: rte_mbuf.h:408
uint32_t rss
Definition: rte_mbuf.h:419
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1545
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:583
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1601
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1451
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:747
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1310
uint64_t phys_addr_t
Definition: rte_memory.h:103
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:115
#define __rte_cache_aligned
Definition: rte_memory.h:96
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1356
uint64_t udata64
Definition: rte_mbuf.h:452
uint32_t l3_type
Definition: rte_mbuf.h:404
uint32_t inner_l2_type
Definition: rte_mbuf.h:407
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:499
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:381
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1649
uint64_t tx_offload
Definition: rte_mbuf.h:461
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:219
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
static void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1223
uint16_t vlan_tci
Definition: rte_mbuf.h:416
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:593
#define RTE_SET_USED(x)
Definition: rte_common.h:103
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1378
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static phys_addr_t rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:555
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:438