DPDK  17.11.10
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
63 #include <stdint.h>
64 #include <rte_common.h>
65 #include <rte_config.h>
66 #include <rte_mempool.h>
67 #include <rte_memory.h>
68 #include <rte_atomic.h>
69 #include <rte_prefetch.h>
70 #include <rte_branch_prediction.h>
71 #include <rte_mbuf_ptype.h>
72 
73 #ifdef __cplusplus
74 extern "C" {
75 #endif
76 
77 /*
78  * Packet Offload Features Flags. It also carry packet type information.
79  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
80  *
81  * - RX flags start at bit position zero, and get added to the left of previous
82  * flags.
83  * - The most-significant 3 bits are reserved for generic mbuf flags
84  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
85  * added to the right of the previously defined flags i.e. they should count
86  * downwards, not upwards.
87  *
88  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
89  * rte_get_tx_ol_flag_name().
90  */
91 
99 #define PKT_RX_VLAN (1ULL << 0)
100 
101 #define PKT_RX_RSS_HASH (1ULL << 1)
102 #define PKT_RX_FDIR (1ULL << 2)
111 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
112 
120 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
121 
122 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
130 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
131 
140 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
141 
142 #define PKT_RX_IP_CKSUM_UNKNOWN 0
143 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
144 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
145 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
146 
155 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
156 
157 #define PKT_RX_L4_CKSUM_UNKNOWN 0
158 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
159 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
160 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
161 
162 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
163 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
164 #define PKT_RX_FDIR_ID (1ULL << 13)
165 #define PKT_RX_FDIR_FLX (1ULL << 14)
175 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
176 
182 #define PKT_RX_LRO (1ULL << 16)
183 
187 #define PKT_RX_TIMESTAMP (1ULL << 17)
188 
192 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
193 
197 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
198 
206 #define PKT_RX_QINQ (1ULL << 20)
207 
208 /* add new RX flags here */
209 
210 /* add new TX flags here */
211 
215 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
216 
221 #define PKT_TX_MACSEC (1ULL << 44)
222 
231 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
232 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
233 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
234 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
235 
236 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
237 /* add new TX TUNNEL type here */
238 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
239 
243 #define PKT_TX_QINQ_PKT (1ULL << 49)
254 #define PKT_TX_TCP_SEG (1ULL << 50)
255 
256 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
266 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
267 #define PKT_TX_TCP_CKSUM (1ULL << 52)
268 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
269 #define PKT_TX_UDP_CKSUM (3ULL << 52)
270 #define PKT_TX_L4_MASK (3ULL << 52)
278 #define PKT_TX_IP_CKSUM (1ULL << 54)
279 
286 #define PKT_TX_IPV4 (1ULL << 55)
287 
294 #define PKT_TX_IPV6 (1ULL << 56)
295 
296 #define PKT_TX_VLAN_PKT (1ULL << 57)
304 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
305 
311 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
312 
318 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
319 
324 #define PKT_TX_OFFLOAD_MASK ( \
325  PKT_TX_OUTER_IPV6 | \
326  PKT_TX_OUTER_IPV4 | \
327  PKT_TX_OUTER_IP_CKSUM | \
328  PKT_TX_VLAN_PKT | \
329  PKT_TX_IPV6 | \
330  PKT_TX_IPV4 | \
331  PKT_TX_IP_CKSUM | \
332  PKT_TX_L4_MASK | \
333  PKT_TX_IEEE1588_TMST | \
334  PKT_TX_TCP_SEG | \
335  PKT_TX_QINQ_PKT | \
336  PKT_TX_TUNNEL_MASK | \
337  PKT_TX_MACSEC | \
338  PKT_TX_SEC_OFFLOAD)
339 
340 #define __RESERVED (1ULL << 61)
342 #define IND_ATTACHED_MBUF (1ULL << 62)
344 /* Use final bit of flags to indicate a control mbuf */
345 #define CTRL_MBUF_FLAG (1ULL << 63)
348 #define RTE_MBUF_PRIV_ALIGN 8
349 
358 const char *rte_get_rx_ol_flag_name(uint64_t mask);
359 
372 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
373 
384 const char *rte_get_tx_ol_flag_name(uint64_t mask);
385 
398 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
399 
406 #define RTE_MBUF_DEFAULT_DATAROOM 2048
407 #define RTE_MBUF_DEFAULT_BUF_SIZE \
408  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
409 
410 /* define a set of marker types that can be used to refer to set points in the
411  * mbuf */
412 __extension__
413 typedef void *MARKER[0];
414 __extension__
415 typedef uint8_t MARKER8[0];
416 __extension__
417 typedef uint64_t MARKER64[0];
423 struct rte_mbuf {
424  MARKER cacheline0;
425 
426  void *buf_addr;
434  union {
435  rte_iova_t buf_iova;
437  } __rte_aligned(sizeof(rte_iova_t));
438 
439  /* next 8 bytes are initialised on RX descriptor rearm */
440  MARKER64 rearm_data;
441  uint16_t data_off;
442 
453  union {
455  uint16_t refcnt;
456  };
457  uint16_t nb_segs;
460  uint16_t port;
461 
462  uint64_t ol_flags;
464  /* remaining bytes are set on RX when pulling packet from descriptor */
465  MARKER rx_descriptor_fields1;
466 
467  /*
468  * The packet type, which is the combination of outer/inner L2, L3, L4
469  * and tunnel types. The packet_type is about data really present in the
470  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
471  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
472  * vlan is stripped from the data.
473  */
475  union {
476  uint32_t packet_type;
477  struct {
478  uint32_t l2_type:4;
479  uint32_t l3_type:4;
480  uint32_t l4_type:4;
481  uint32_t tun_type:4;
483  union {
489  __extension__
490  struct {
491  uint8_t inner_l2_type:4;
493  uint8_t inner_l3_type:4;
495  };
496  };
497  uint32_t inner_l4_type:4;
498  };
499  };
500 
501  uint32_t pkt_len;
502  uint16_t data_len;
504  uint16_t vlan_tci;
505 
506  union {
507  uint32_t rss;
508  struct {
510  union {
511  struct {
512  uint16_t hash;
513  uint16_t id;
514  };
515  uint32_t lo;
517  };
518  uint32_t hi;
521  } fdir;
522  struct {
523  uint32_t lo;
524  uint32_t hi;
525  } sched;
526  uint32_t usr;
527  } hash;
530  uint16_t vlan_tci_outer;
531 
532  uint16_t buf_len;
537  uint64_t timestamp;
538 
539  /* second cache line - fields only used in slow path or on TX */
540  MARKER cacheline1 __rte_cache_min_aligned;
541 
543  union {
544  void *userdata;
545  uint64_t udata64;
546  };
547 
548  struct rte_mempool *pool;
549  struct rte_mbuf *next;
551  /* fields to support TX offloads */
553  union {
554  uint64_t tx_offload;
555  __extension__
556  struct {
557  uint64_t l2_len:7;
561  uint64_t l3_len:9;
562  uint64_t l4_len:8;
563  uint64_t tso_segsz:16;
565  /* fields for TX offloading of tunnels */
566  uint64_t outer_l3_len:9;
567  uint64_t outer_l2_len:7;
569  /* uint64_t unused:8; */
570  };
571  };
572 
575  uint16_t priv_size;
576 
578  uint16_t timesync;
579 
581  uint32_t seqn;
582 
584 
586 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
587 
598 static inline void
600 {
601  rte_prefetch0(&m->cacheline0);
602 }
603 
615 static inline void
617 {
618 #if RTE_CACHE_LINE_SIZE == 64
619  rte_prefetch0(&m->cacheline1);
620 #else
621  RTE_SET_USED(m);
622 #endif
623 }
624 
625 
626 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
627 
636 static inline rte_iova_t
637 rte_mbuf_data_iova(const struct rte_mbuf *mb)
638 {
639  return mb->buf_iova + mb->data_off;
640 }
641 
642 __rte_deprecated
643 static inline phys_addr_t
644 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
645 {
646  return rte_mbuf_data_iova(mb);
647 }
648 
661 static inline rte_iova_t
663 {
664  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
665 }
666 
667 __rte_deprecated
668 static inline phys_addr_t
669 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
670 {
671  return rte_mbuf_data_iova_default(mb);
672 }
673 
682 static inline struct rte_mbuf *
684 {
685  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
686 }
687 
696 static inline char *
698 {
699  char *buffer_addr;
700  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
701  return buffer_addr;
702 }
703 
707 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
708 
712 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
713 
722  uint16_t mbuf_priv_size;
723 };
724 
725 #ifdef RTE_LIBRTE_MBUF_DEBUG
726 
728 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
729 
730 #else /* RTE_LIBRTE_MBUF_DEBUG */
731 
733 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
734 
735 #endif /* RTE_LIBRTE_MBUF_DEBUG */
736 
737 #ifdef RTE_MBUF_REFCNT_ATOMIC
738 
746 static inline uint16_t
747 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
748 {
749  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
750 }
751 
759 static inline void
760 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
761 {
762  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
763 }
764 
765 /* internal */
766 static inline uint16_t
767 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
768 {
769  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
770 }
771 
781 static inline uint16_t
782 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
783 {
784  /*
785  * The atomic_add is an expensive operation, so we don't want to
786  * call it in the case where we know we are the unique holder of
787  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
788  * operation has to be used because concurrent accesses on the
789  * reference counter can occur.
790  */
791  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
792  ++value;
793  rte_mbuf_refcnt_set(m, (uint16_t)value);
794  return (uint16_t)value;
795  }
796 
797  return __rte_mbuf_refcnt_update(m, value);
798 }
799 
800 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
801 
802 /* internal */
803 static inline uint16_t
804 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
805 {
806  m->refcnt = (uint16_t)(m->refcnt + value);
807  return m->refcnt;
808 }
809 
813 static inline uint16_t
814 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
815 {
816  return __rte_mbuf_refcnt_update(m, value);
817 }
818 
822 static inline uint16_t
824 {
825  return m->refcnt;
826 }
827 
831 static inline void
832 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
833 {
834  m->refcnt = new_value;
835 }
836 
837 #endif /* RTE_MBUF_REFCNT_ATOMIC */
838 
840 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
841  if ((m) != NULL) \
842  rte_prefetch0(m); \
843 } while (0)
844 
845 
858 void
859 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
860 
861 #define MBUF_RAW_ALLOC_CHECK(m) do { \
862  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
863  RTE_ASSERT((m)->next == NULL); \
864  RTE_ASSERT((m)->nb_segs == 1); \
865  __rte_mbuf_sanity_check(m, 0); \
866 } while (0)
867 
887 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
888 {
889  struct rte_mbuf *m;
890  void *mb = NULL;
891 
892  if (rte_mempool_get(mp, &mb) < 0)
893  return NULL;
894  m = (struct rte_mbuf *)mb;
895  MBUF_RAW_ALLOC_CHECK(m);
896  return m;
897 }
898 
913 static __rte_always_inline void
915 {
916  RTE_ASSERT(RTE_MBUF_DIRECT(m));
917  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
918  RTE_ASSERT(m->next == NULL);
919  RTE_ASSERT(m->nb_segs == 1);
921  rte_mempool_put(m->pool, m);
922 }
923 
924 /* compat with older versions */
925 __rte_deprecated
926 static inline void
927 __rte_mbuf_raw_free(struct rte_mbuf *m)
928 {
930 }
931 
932 /* Operations on ctrl mbuf */
933 
953 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
954  void *m, unsigned i);
955 
968 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
969 
976 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
977 
986 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
987 
996 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
997 
1007 static inline int
1009 {
1010  return !!(m->ol_flags & CTRL_MBUF_FLAG);
1011 }
1012 
1013 /* Operations on pkt mbuf */
1014 
1034 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1035  void *m, unsigned i);
1036 
1037 
1055 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1056 
1091 struct rte_mempool *
1092 rte_pktmbuf_pool_create(const char *name, unsigned n,
1093  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1094  int socket_id);
1095 
1107 static inline uint16_t
1109 {
1110  struct rte_pktmbuf_pool_private *mbp_priv;
1111 
1112  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1113  return mbp_priv->mbuf_data_room_size;
1114 }
1115 
1128 static inline uint16_t
1130 {
1131  struct rte_pktmbuf_pool_private *mbp_priv;
1132 
1133  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1134  return mbp_priv->mbuf_priv_size;
1135 }
1136 
1145 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1146 {
1147  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1148  (uint16_t)m->buf_len);
1149 }
1150 
1159 #define MBUF_INVALID_PORT UINT16_MAX
1160 
1161 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1162 {
1163  m->next = NULL;
1164  m->pkt_len = 0;
1165  m->tx_offload = 0;
1166  m->vlan_tci = 0;
1167  m->vlan_tci_outer = 0;
1168  m->nb_segs = 1;
1169  m->port = MBUF_INVALID_PORT;
1170 
1171  m->ol_flags = 0;
1172  m->packet_type = 0;
1174 
1175  m->data_len = 0;
1177 }
1178 
1192 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1193 {
1194  struct rte_mbuf *m;
1195  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1196  rte_pktmbuf_reset(m);
1197  return m;
1198 }
1199 
1214 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1215  struct rte_mbuf **mbufs, unsigned count)
1216 {
1217  unsigned idx = 0;
1218  int rc;
1219 
1220  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1221  if (unlikely(rc))
1222  return rc;
1223 
1224  /* To understand duff's device on loop unwinding optimization, see
1225  * https://en.wikipedia.org/wiki/Duff's_device.
1226  * Here while() loop is used rather than do() while{} to avoid extra
1227  * check if count is zero.
1228  */
1229  switch (count % 4) {
1230  case 0:
1231  while (idx != count) {
1232  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1233  rte_pktmbuf_reset(mbufs[idx]);
1234  idx++;
1235  /* fall-through */
1236  case 3:
1237  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1238  rte_pktmbuf_reset(mbufs[idx]);
1239  idx++;
1240  /* fall-through */
1241  case 2:
1242  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1243  rte_pktmbuf_reset(mbufs[idx]);
1244  idx++;
1245  /* fall-through */
1246  case 1:
1247  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1248  rte_pktmbuf_reset(mbufs[idx]);
1249  idx++;
1250  /* fall-through */
1251  }
1252  }
1253  return 0;
1254 }
1255 
1273 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1274 {
1275  struct rte_mbuf *md;
1276 
1277  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1278  rte_mbuf_refcnt_read(mi) == 1);
1279 
1280  /* if m is not direct, get the mbuf that embeds the data */
1281  if (RTE_MBUF_DIRECT(m))
1282  md = m;
1283  else
1284  md = rte_mbuf_from_indirect(m);
1285 
1286  rte_mbuf_refcnt_update(md, 1);
1287  mi->priv_size = m->priv_size;
1288  mi->buf_iova = m->buf_iova;
1289  mi->buf_addr = m->buf_addr;
1290  mi->buf_len = m->buf_len;
1291 
1292  mi->data_off = m->data_off;
1293  mi->data_len = m->data_len;
1294  mi->port = m->port;
1295  mi->vlan_tci = m->vlan_tci;
1296  mi->vlan_tci_outer = m->vlan_tci_outer;
1297  mi->tx_offload = m->tx_offload;
1298  mi->hash = m->hash;
1299 
1300  mi->next = NULL;
1301  mi->pkt_len = mi->data_len;
1302  mi->nb_segs = 1;
1303  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1304  mi->packet_type = m->packet_type;
1305  mi->timestamp = m->timestamp;
1306 
1307  __rte_mbuf_sanity_check(mi, 1);
1309 }
1310 
1324 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1325 {
1326  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1327  struct rte_mempool *mp = m->pool;
1328  uint32_t mbuf_size, buf_len;
1329  uint16_t priv_size;
1330 
1331  priv_size = rte_pktmbuf_priv_size(mp);
1332  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1333  buf_len = rte_pktmbuf_data_room_size(mp);
1334 
1335  m->priv_size = priv_size;
1336  m->buf_addr = (char *)m + mbuf_size;
1337  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1338  m->buf_len = (uint16_t)buf_len;
1340  m->data_len = 0;
1341  m->ol_flags = 0;
1342 
1343  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1344  md->next = NULL;
1345  md->nb_segs = 1;
1346  rte_mbuf_refcnt_set(md, 1);
1347  rte_mbuf_raw_free(md);
1348  }
1349 }
1350 
1365 static __rte_always_inline struct rte_mbuf *
1367 {
1369 
1370  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1371 
1372  if (RTE_MBUF_INDIRECT(m))
1373  rte_pktmbuf_detach(m);
1374 
1375  if (m->next != NULL) {
1376  m->next = NULL;
1377  m->nb_segs = 1;
1378  }
1379 
1380  return m;
1381 
1382  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1383 
1384  if (RTE_MBUF_INDIRECT(m))
1385  rte_pktmbuf_detach(m);
1386 
1387  if (m->next != NULL) {
1388  m->next = NULL;
1389  m->nb_segs = 1;
1390  }
1391  rte_mbuf_refcnt_set(m, 1);
1392 
1393  return m;
1394  }
1395  return NULL;
1396 }
1397 
1398 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1399 __rte_deprecated
1400 static inline struct rte_mbuf *
1401 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1402 {
1403  return rte_pktmbuf_prefree_seg(m);
1404 }
1405 
1415 static __rte_always_inline void
1417 {
1418  m = rte_pktmbuf_prefree_seg(m);
1419  if (likely(m != NULL))
1420  rte_mbuf_raw_free(m);
1421 }
1422 
1432 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1433 {
1434  struct rte_mbuf *m_next;
1435 
1436  if (m != NULL)
1438 
1439  while (m != NULL) {
1440  m_next = m->next;
1442  m = m_next;
1443  }
1444 }
1445 
1463 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1464  struct rte_mempool *mp)
1465 {
1466  struct rte_mbuf *mc, *mi, **prev;
1467  uint32_t pktlen;
1468  uint16_t nseg;
1469 
1470  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1471  return NULL;
1472 
1473  mi = mc;
1474  prev = &mi->next;
1475  pktlen = md->pkt_len;
1476  nseg = 0;
1477 
1478  do {
1479  nseg++;
1480  rte_pktmbuf_attach(mi, md);
1481  *prev = mi;
1482  prev = &mi->next;
1483  } while ((md = md->next) != NULL &&
1484  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1485 
1486  *prev = NULL;
1487  mc->nb_segs = nseg;
1488  mc->pkt_len = pktlen;
1489 
1490  /* Allocation of new indirect segment failed */
1491  if (unlikely (mi == NULL)) {
1492  rte_pktmbuf_free(mc);
1493  return NULL;
1494  }
1495 
1496  __rte_mbuf_sanity_check(mc, 1);
1497  return mc;
1498 }
1499 
1511 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1512 {
1514 
1515  do {
1516  rte_mbuf_refcnt_update(m, v);
1517  } while ((m = m->next) != NULL);
1518 }
1519 
1528 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1529 {
1531  return m->data_off;
1532 }
1533 
1542 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1543 {
1545  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1546  m->data_len);
1547 }
1548 
1557 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1558 {
1559  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1560 
1562  while (m2->next != NULL)
1563  m2 = m2->next;
1564  return m2;
1565 }
1566 
1581 #define rte_pktmbuf_mtod_offset(m, t, o) \
1582  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1583 
1596 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1597 
1607 #define rte_pktmbuf_iova_offset(m, o) \
1608  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1609 
1610 /* deprecated */
1611 #define rte_pktmbuf_mtophys_offset(m, o) \
1612  rte_pktmbuf_iova_offset(m, o)
1613 
1621 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1622 
1623 /* deprecated */
1624 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1625 
1634 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1635 
1644 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1645 
1661 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1662  uint16_t len)
1663 {
1665 
1666  if (unlikely(len > rte_pktmbuf_headroom(m)))
1667  return NULL;
1668 
1669  /* NB: elaborating the subtraction like this instead of using
1670  * -= allows us to ensure the result type is uint16_t
1671  * avoiding compiler warnings on gcc 8.1 at least */
1672  m->data_off = (uint16_t)(m->data_off - len);
1673  m->data_len = (uint16_t)(m->data_len + len);
1674  m->pkt_len = (m->pkt_len + len);
1675 
1676  return (char *)m->buf_addr + m->data_off;
1677 }
1678 
1694 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1695 {
1696  void *tail;
1697  struct rte_mbuf *m_last;
1698 
1700 
1701  m_last = rte_pktmbuf_lastseg(m);
1702  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1703  return NULL;
1704 
1705  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1706  m_last->data_len = (uint16_t)(m_last->data_len + len);
1707  m->pkt_len = (m->pkt_len + len);
1708  return (char*) tail;
1709 }
1710 
1725 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1726 {
1728 
1729  if (unlikely(len > m->data_len))
1730  return NULL;
1731 
1732  /* NB: elaborating the addition like this instead of using
1733  * += allows us to ensure the result type is uint16_t
1734  * avoiding compiler warnings on gcc 8.1 at least */
1735  m->data_len = (uint16_t)(m->data_len - len);
1736  m->data_off = (uint16_t)(m->data_off + len);
1737  m->pkt_len = (m->pkt_len - len);
1738  return (char *)m->buf_addr + m->data_off;
1739 }
1740 
1755 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1756 {
1757  struct rte_mbuf *m_last;
1758 
1760 
1761  m_last = rte_pktmbuf_lastseg(m);
1762  if (unlikely(len > m_last->data_len))
1763  return -1;
1764 
1765  m_last->data_len = (uint16_t)(m_last->data_len - len);
1766  m->pkt_len = (m->pkt_len - len);
1767  return 0;
1768 }
1769 
1779 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1780 {
1782  return !!(m->nb_segs == 1);
1783 }
1784 
1788 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1789  uint32_t len, void *buf);
1790 
1811 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1812  uint32_t off, uint32_t len, void *buf)
1813 {
1814  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1815  return rte_pktmbuf_mtod_offset(m, char *, off);
1816  else
1817  return __rte_pktmbuf_read(m, off, len, buf);
1818 }
1819 
1836 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1837 {
1838  struct rte_mbuf *cur_tail;
1839 
1840  /* Check for number-of-segments-overflow */
1841  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1842  return -EOVERFLOW;
1843 
1844  /* Chain 'tail' onto the old tail */
1845  cur_tail = rte_pktmbuf_lastseg(head);
1846  cur_tail->next = tail;
1847 
1848  /* accumulate number of segments and total length.
1849  * NB: elaborating the addition like this instead of using
1850  * -= allows us to ensure the result type is uint16_t
1851  * avoiding compiler warnings on gcc 8.1 at least */
1852  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1853  head->pkt_len += tail->pkt_len;
1854 
1855  /* pkt_len is only set in the head */
1856  tail->pkt_len = tail->data_len;
1857 
1858  return 0;
1859 }
1860 
1871 static inline int
1873 {
1874  uint64_t ol_flags = m->ol_flags;
1875  uint64_t inner_l3_offset = m->l2_len;
1876 
1877  /* Does packet set any of available offloads? */
1878  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1879  return 0;
1880 
1881  if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
1882  /* NB: elaborating the addition like this instead of using
1883  * += gives the result uint64_t type instead of int,
1884  * avoiding compiler warnings on gcc 8.1 at least */
1885  inner_l3_offset = inner_l3_offset + m->outer_l2_len +
1886  m->outer_l3_len;
1887 
1888  /* Headers are fragmented */
1889  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1890  return -ENOTSUP;
1891 
1892  /* IP checksum can be counted only for IPv4 packet */
1893  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1894  return -EINVAL;
1895 
1896  /* IP type not set when required */
1897  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1898  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1899  return -EINVAL;
1900 
1901  /* Check requirements for TSO packet */
1902  if (ol_flags & PKT_TX_TCP_SEG)
1903  if ((m->tso_segsz == 0) ||
1904  ((ol_flags & PKT_TX_IPV4) &&
1905  !(ol_flags & PKT_TX_IP_CKSUM)))
1906  return -EINVAL;
1907 
1908  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1909  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1910  !(ol_flags & PKT_TX_OUTER_IPV4))
1911  return -EINVAL;
1912 
1913  return 0;
1914 }
1915 
1928 static inline int
1930 {
1931  size_t seg_len, copy_len;
1932  struct rte_mbuf *m;
1933  struct rte_mbuf *m_next;
1934  char *buffer;
1935 
1936  if (rte_pktmbuf_is_contiguous(mbuf))
1937  return 0;
1938 
1939  /* Extend first segment to the total packet length */
1940  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1941 
1942  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1943  return -1;
1944 
1945  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1946  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1947 
1948  /* Append data from next segments to the first one */
1949  m = mbuf->next;
1950  while (m != NULL) {
1951  m_next = m->next;
1952 
1953  seg_len = rte_pktmbuf_data_len(m);
1954  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1955  buffer += seg_len;
1956 
1958  m = m_next;
1959  }
1960 
1961  mbuf->next = NULL;
1962  mbuf->nb_segs = 1;
1963 
1964  return 0;
1965 }
1966 
1981 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1982 
1983 #ifdef __cplusplus
1984 }
1985 #endif
1986 
1987 #endif /* _RTE_MBUF_H_ */
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:637
struct rte_mbuf * next
Definition: rte_mbuf.h:549
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:721
uint64_t timestamp
Definition: rte_mbuf.h:537
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:530
#define __rte_always_inline
Definition: rte_common.h:139
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:204
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1192
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:484
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:413
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:712
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:342
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:436
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1129
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1872
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1432
uint64_t l2_len
Definition: rte_mbuf.h:557
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1463
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1416
void * buf_addr
Definition: rte_mbuf.h:426
uint32_t l2_type
Definition: rte_mbuf.h:478
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:683
uint16_t data_len
Definition: rte_mbuf.h:502
uint32_t lo
Definition: rte_mbuf.h:515
void * userdata
Definition: rte_mbuf.h:544
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1836
uint8_t inner_l2_type
Definition: rte_mbuf.h:491
uint64_t tso_segsz
Definition: rte_mbuf.h:563
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:415
uint64_t l4_len
Definition: rte_mbuf.h:562
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1528
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1214
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1145
uint32_t cache_size
Definition: rte_mempool.h:241
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:304
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:616
#define PKT_TX_IPV6
Definition: rte_mbuf.h:294
uint16_t nb_segs
Definition: rte_mbuf.h:457
uint16_t port
Definition: rte_mbuf.h:460
uint64_t outer_l3_len
Definition: rte_mbuf.h:566
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1366
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1779
uint64_t l3_len
Definition: rte_mbuf.h:561
uint32_t l4_type
Definition: rte_mbuf.h:480
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:311
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1542
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:914
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:254
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:575
uint16_t timesync
Definition: rte_mbuf.h:578
uint32_t hi
Definition: rte_mbuf.h:518
struct rte_mbuf::@117::@129 sched
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:417
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:310
#define PKT_TX_IPV4
Definition: rte_mbuf.h:286
struct rte_mbuf::@117::@128 fdir
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:733
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:823
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1929
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1392
uint64_t outer_l2_len
Definition: rte_mbuf.h:567
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:218
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:345
uint16_t refcnt
Definition: rte_mbuf.h:455
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1725
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1634
RTE_STD_C11 union rte_mbuf::@114 __rte_aligned
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1273
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1364
uint32_t tun_type
Definition: rte_mbuf.h:481
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:299
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:1008
uint64_t ol_flags
Definition: rte_mbuf.h:462
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1324
uint32_t pkt_len
Definition: rte_mbuf.h:501
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:270
uint16_t buf_len
Definition: rte_mbuf.h:532
uint32_t inner_l4_type
Definition: rte_mbuf.h:497
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1644
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1596
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:814
uint32_t packet_type
Definition: rte_mbuf.h:476
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1159
uint32_t seqn
Definition: rte_mbuf.h:581
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1108
uint8_t inner_l3_type
Definition: rte_mbuf.h:493
const char * rte_get_rx_ol_flag_name(uint64_t mask)
#define RTE_STD_C11
Definition: rte_common.h:66
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:278
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:548
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1694
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:832
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:662
uint32_t rss
Definition: rte_mbuf.h:507
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1755
uint64_t rte_iova_t
Definition: rte_memory.h:107
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:697
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1811
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1661
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:887
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1511
uint64_t phys_addr_t
Definition: rte_memory.h:98
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:156
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1557
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:324
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1475
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1233
uint64_t udata64
Definition: rte_mbuf.h:545
uint32_t l3_type
Definition: rte_mbuf.h:479
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:599
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:454
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1510
uint64_t tx_offload
Definition: rte_mbuf.h:554
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:230
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
uint16_t vlan_tci
Definition: rte_mbuf.h:504
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:707
#define RTE_SET_USED(x)
Definition: rte_common.h:111
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1581
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:526