DPDK  20.11.0-rc1
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_prefetch.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_byteorder.h>
43 #include <rte_mbuf_ptype.h>
44 #include <rte_mbuf_core.h>
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
58 const char *rte_get_rx_ol_flag_name(uint64_t mask);
59 
72 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
73 
84 const char *rte_get_tx_ol_flag_name(uint64_t mask);
85 
98 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
99 
110 static inline void
112 {
113  rte_prefetch0(&m->cacheline0);
114 }
115 
127 static inline void
129 {
130 #if RTE_CACHE_LINE_SIZE == 64
131  rte_prefetch0(&m->cacheline1);
132 #else
133  RTE_SET_USED(m);
134 #endif
135 }
136 
137 
138 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
139 
148 static inline rte_iova_t
149 rte_mbuf_data_iova(const struct rte_mbuf *mb)
150 {
151  return mb->buf_iova + mb->data_off;
152 }
153 
166 static inline rte_iova_t
168 {
169  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
170 }
171 
180 static inline struct rte_mbuf *
182 {
183  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
184 }
185 
206 __rte_experimental
207 static inline char *
208 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
209 {
210  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
211 }
212 
224 __rte_experimental
225 static inline char *
227 {
228  /* gcc complains about calling this experimental function even
229  * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
230  */
231 #ifdef ALLOW_EXPERIMENTAL_API
232  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
233 #else
234  return NULL;
235 #endif
236 }
237 
251 static inline char *
253 {
254 #ifdef ALLOW_EXPERIMENTAL_API
255  return rte_mbuf_buf_addr(md, md->pool);
256 #else
257  char *buffer_addr;
258  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
259  return buffer_addr;
260 #endif
261 }
262 
275 __rte_experimental
276 static inline void *
278 {
279  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
280 }
281 
290  uint16_t mbuf_priv_size;
291  uint32_t flags;
292 };
293 
302 static inline uint32_t
304 {
305  struct rte_pktmbuf_pool_private *mbp_priv;
306 
307  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
308  return mbp_priv->flags;
309 }
310 
317 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
318 
326 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
327  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
328 
329 #ifdef RTE_LIBRTE_MBUF_DEBUG
330 
332 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
333 
334 #else /* RTE_LIBRTE_MBUF_DEBUG */
335 
337 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
338 
339 #endif /* RTE_LIBRTE_MBUF_DEBUG */
340 
341 #ifdef RTE_MBUF_REFCNT_ATOMIC
342 
350 static inline uint16_t
351 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
352 {
353  return __atomic_load_n(&m->refcnt, __ATOMIC_RELAXED);
354 }
355 
363 static inline void
364 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
365 {
366  __atomic_store_n(&m->refcnt, new_value, __ATOMIC_RELAXED);
367 }
368 
369 /* internal */
370 static inline uint16_t
371 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
372 {
373  return __atomic_add_fetch(&m->refcnt, (uint16_t)value,
374  __ATOMIC_ACQ_REL);
375 }
376 
386 static inline uint16_t
387 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
388 {
389  /*
390  * The atomic_add is an expensive operation, so we don't want to
391  * call it in the case where we know we are the unique holder of
392  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
393  * operation has to be used because concurrent accesses on the
394  * reference counter can occur.
395  */
396  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
397  ++value;
398  rte_mbuf_refcnt_set(m, (uint16_t)value);
399  return (uint16_t)value;
400  }
401 
402  return __rte_mbuf_refcnt_update(m, value);
403 }
404 
405 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
406 
407 /* internal */
408 static inline uint16_t
409 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
410 {
411  m->refcnt = (uint16_t)(m->refcnt + value);
412  return m->refcnt;
413 }
414 
418 static inline uint16_t
419 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
420 {
421  return __rte_mbuf_refcnt_update(m, value);
422 }
423 
427 static inline uint16_t
429 {
430  return m->refcnt;
431 }
432 
436 static inline void
437 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
438 {
439  m->refcnt = new_value;
440 }
441 
442 #endif /* RTE_MBUF_REFCNT_ATOMIC */
443 
452 static inline uint16_t
454 {
455  return __atomic_load_n(&shinfo->refcnt, __ATOMIC_RELAXED);
456 }
457 
466 static inline void
468  uint16_t new_value)
469 {
470  __atomic_store_n(&shinfo->refcnt, new_value, __ATOMIC_RELAXED);
471 }
472 
484 static inline uint16_t
486  int16_t value)
487 {
488  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
489  ++value;
490  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
491  return (uint16_t)value;
492  }
493 
494  return __atomic_add_fetch(&shinfo->refcnt, (uint16_t)value,
495  __ATOMIC_ACQ_REL);
496 }
497 
499 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
500  if ((m) != NULL) \
501  rte_prefetch0(m); \
502 } while (0)
503 
504 
517 void
518 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
519 
539 __rte_experimental
540 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
541  const char **reason);
542 
543 #define MBUF_RAW_ALLOC_CHECK(m) do { \
544  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
545  RTE_ASSERT((m)->next == NULL); \
546  RTE_ASSERT((m)->nb_segs == 1); \
547  __rte_mbuf_sanity_check(m, 0); \
548 } while (0)
549 
569 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
570 {
571  struct rte_mbuf *m;
572 
573  if (rte_mempool_get(mp, (void **)&m) < 0)
574  return NULL;
575  MBUF_RAW_ALLOC_CHECK(m);
576  return m;
577 }
578 
593 static __rte_always_inline void
595 {
596  RTE_ASSERT(!RTE_MBUF_CLONED(m) &&
598  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
599  RTE_ASSERT(m->next == NULL);
600  RTE_ASSERT(m->nb_segs == 1);
602  rte_mempool_put(m->pool, m);
603 }
604 
624 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
625  void *m, unsigned i);
626 
644 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
645 
680 struct rte_mempool *
681 rte_pktmbuf_pool_create(const char *name, unsigned n,
682  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
683  int socket_id);
684 
722 struct rte_mempool *
723 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
724  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
725  int socket_id, const char *ops_name);
726 
729  void *buf_ptr;
731  size_t buf_len;
732  uint16_t elt_size;
733 };
734 
776 __rte_experimental
777 struct rte_mempool *
778 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
779  unsigned int cache_size, uint16_t priv_size,
780  uint16_t data_room_size, int socket_id,
781  const struct rte_pktmbuf_extmem *ext_mem,
782  unsigned int ext_num);
783 
795 static inline uint16_t
797 {
798  struct rte_pktmbuf_pool_private *mbp_priv;
799 
800  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
801  return mbp_priv->mbuf_data_room_size;
802 }
803 
816 static inline uint16_t
818 {
819  struct rte_pktmbuf_pool_private *mbp_priv;
820 
821  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
822  return mbp_priv->mbuf_priv_size;
823 }
824 
833 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
834 {
835  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
836  (uint16_t)m->buf_len);
837 }
838 
847 #define MBUF_INVALID_PORT UINT16_MAX
848 
849 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
850 {
851  m->next = NULL;
852  m->pkt_len = 0;
853  m->tx_offload = 0;
854  m->vlan_tci = 0;
855  m->vlan_tci_outer = 0;
856  m->nb_segs = 1;
857  m->port = MBUF_INVALID_PORT;
858 
860  m->packet_type = 0;
862 
863  m->data_len = 0;
865 }
866 
880 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
881 {
882  struct rte_mbuf *m;
883  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
884  rte_pktmbuf_reset(m);
885  return m;
886 }
887 
902 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
903  struct rte_mbuf **mbufs, unsigned count)
904 {
905  unsigned idx = 0;
906  int rc;
907 
908  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
909  if (unlikely(rc))
910  return rc;
911 
912  /* To understand duff's device on loop unwinding optimization, see
913  * https://en.wikipedia.org/wiki/Duff's_device.
914  * Here while() loop is used rather than do() while{} to avoid extra
915  * check if count is zero.
916  */
917  switch (count % 4) {
918  case 0:
919  while (idx != count) {
920  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
921  rte_pktmbuf_reset(mbufs[idx]);
922  idx++;
923  /* fall-through */
924  case 3:
925  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
926  rte_pktmbuf_reset(mbufs[idx]);
927  idx++;
928  /* fall-through */
929  case 2:
930  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
931  rte_pktmbuf_reset(mbufs[idx]);
932  idx++;
933  /* fall-through */
934  case 1:
935  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
936  rte_pktmbuf_reset(mbufs[idx]);
937  idx++;
938  /* fall-through */
939  }
940  }
941  return 0;
942 }
943 
976 static inline struct rte_mbuf_ext_shared_info *
977 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
979 {
980  struct rte_mbuf_ext_shared_info *shinfo;
981  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
982  void *addr;
983 
984  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
985  sizeof(uintptr_t));
986  if (addr <= buf_addr)
987  return NULL;
988 
989  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
990  shinfo->free_cb = free_cb;
991  shinfo->fcb_opaque = fcb_opaque;
992  rte_mbuf_ext_refcnt_set(shinfo, 1);
993 
994  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
995  return shinfo;
996 }
997 
1058 static inline void
1059 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1060  rte_iova_t buf_iova, uint16_t buf_len,
1061  struct rte_mbuf_ext_shared_info *shinfo)
1062 {
1063  /* mbuf should not be read-only */
1064  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1065  RTE_ASSERT(shinfo->free_cb != NULL);
1066 
1067  m->buf_addr = buf_addr;
1068  m->buf_iova = buf_iova;
1069  m->buf_len = buf_len;
1070 
1071  m->data_len = 0;
1072  m->data_off = 0;
1073 
1075  m->shinfo = shinfo;
1076 }
1077 
1085 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1086 
1095 static inline void
1096 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1097 {
1098  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1099 }
1100 
1101 /* internal */
1102 static inline void
1103 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1104 {
1105  mdst->port = msrc->port;
1106  mdst->vlan_tci = msrc->vlan_tci;
1107  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1108  mdst->tx_offload = msrc->tx_offload;
1109  mdst->hash = msrc->hash;
1110  mdst->packet_type = msrc->packet_type;
1111  mdst->timestamp = msrc->timestamp;
1112  rte_mbuf_dynfield_copy(mdst, msrc);
1113 }
1114 
1136 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1137 {
1138  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1139  rte_mbuf_refcnt_read(mi) == 1);
1140 
1141  if (RTE_MBUF_HAS_EXTBUF(m)) {
1143  mi->ol_flags = m->ol_flags;
1144  mi->shinfo = m->shinfo;
1145  } else {
1146  /* if m is not direct, get the mbuf that embeds the data */
1148  mi->priv_size = m->priv_size;
1149  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1150  }
1151 
1152  __rte_pktmbuf_copy_hdr(mi, m);
1153 
1154  mi->data_off = m->data_off;
1155  mi->data_len = m->data_len;
1156  mi->buf_iova = m->buf_iova;
1157  mi->buf_addr = m->buf_addr;
1158  mi->buf_len = m->buf_len;
1159 
1160  mi->next = NULL;
1161  mi->pkt_len = mi->data_len;
1162  mi->nb_segs = 1;
1163 
1164  __rte_mbuf_sanity_check(mi, 1);
1166 }
1167 
1175 static inline void
1176 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1177 {
1178  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1179  RTE_ASSERT(m->shinfo != NULL);
1180 
1181  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1182  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1183 }
1184 
1191 static inline void
1192 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1193 {
1194  struct rte_mbuf *md;
1195 
1196  RTE_ASSERT(RTE_MBUF_CLONED(m));
1197 
1198  md = rte_mbuf_from_indirect(m);
1199 
1200  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1201  md->next = NULL;
1202  md->nb_segs = 1;
1203  rte_mbuf_refcnt_set(md, 1);
1204  rte_mbuf_raw_free(md);
1205  }
1206 }
1207 
1226 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1227 {
1228  struct rte_mempool *mp = m->pool;
1229  uint32_t mbuf_size, buf_len;
1230  uint16_t priv_size;
1231 
1232  if (RTE_MBUF_HAS_EXTBUF(m)) {
1233  /*
1234  * The mbuf has the external attached buffer,
1235  * we should check the type of the memory pool where
1236  * the mbuf was allocated from to detect the pinned
1237  * external buffer.
1238  */
1239  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1240 
1241  if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
1242  /*
1243  * The pinned external buffer should not be
1244  * detached from its backing mbuf, just exit.
1245  */
1246  return;
1247  }
1248  __rte_pktmbuf_free_extbuf(m);
1249  } else {
1250  __rte_pktmbuf_free_direct(m);
1251  }
1252  priv_size = rte_pktmbuf_priv_size(mp);
1253  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1254  buf_len = rte_pktmbuf_data_room_size(mp);
1255 
1256  m->priv_size = priv_size;
1257  m->buf_addr = (char *)m + mbuf_size;
1258  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1259  m->buf_len = (uint16_t)buf_len;
1261  m->data_len = 0;
1262  m->ol_flags = 0;
1263 }
1264 
1278 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1279 {
1280  struct rte_mbuf_ext_shared_info *shinfo;
1281 
1282  /* Clear flags, mbuf is being freed. */
1284  shinfo = m->shinfo;
1285 
1286  /* Optimize for performance - do not dec/reinit */
1287  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1288  return 0;
1289 
1290  /*
1291  * Direct usage of add primitive to avoid
1292  * duplication of comparing with one.
1293  */
1294  if (likely(__atomic_add_fetch(&shinfo->refcnt, (uint16_t)-1,
1295  __ATOMIC_ACQ_REL)))
1296  return 1;
1297 
1298  /* Reinitialize counter before mbuf freeing. */
1299  rte_mbuf_ext_refcnt_set(shinfo, 1);
1300  return 0;
1301 }
1302 
1317 static __rte_always_inline struct rte_mbuf *
1319 {
1321 
1322  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1323 
1324  if (!RTE_MBUF_DIRECT(m)) {
1325  rte_pktmbuf_detach(m);
1326  if (RTE_MBUF_HAS_EXTBUF(m) &&
1328  __rte_pktmbuf_pinned_extbuf_decref(m))
1329  return NULL;
1330  }
1331 
1332  if (m->next != NULL) {
1333  m->next = NULL;
1334  m->nb_segs = 1;
1335  }
1336 
1337  return m;
1338 
1339  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1340 
1341  if (!RTE_MBUF_DIRECT(m)) {
1342  rte_pktmbuf_detach(m);
1343  if (RTE_MBUF_HAS_EXTBUF(m) &&
1345  __rte_pktmbuf_pinned_extbuf_decref(m))
1346  return NULL;
1347  }
1348 
1349  if (m->next != NULL) {
1350  m->next = NULL;
1351  m->nb_segs = 1;
1352  }
1353  rte_mbuf_refcnt_set(m, 1);
1354 
1355  return m;
1356  }
1357  return NULL;
1358 }
1359 
1369 static __rte_always_inline void
1371 {
1372  m = rte_pktmbuf_prefree_seg(m);
1373  if (likely(m != NULL))
1374  rte_mbuf_raw_free(m);
1375 }
1376 
1386 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1387 {
1388  struct rte_mbuf *m_next;
1389 
1390  if (m != NULL)
1392 
1393  while (m != NULL) {
1394  m_next = m->next;
1396  m = m_next;
1397  }
1398 }
1399 
1412 __rte_experimental
1413 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1414 
1432 struct rte_mbuf *
1433 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1434 
1456 __rte_experimental
1457 struct rte_mbuf *
1458 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1459  uint32_t offset, uint32_t length);
1460 
1472 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1473 {
1475 
1476  do {
1477  rte_mbuf_refcnt_update(m, v);
1478  } while ((m = m->next) != NULL);
1479 }
1480 
1489 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1490 {
1492  return m->data_off;
1493 }
1494 
1503 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1504 {
1506  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1507  m->data_len);
1508 }
1509 
1518 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1519 {
1521  while (m->next != NULL)
1522  m = m->next;
1523  return m;
1524 }
1525 
1534 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1535 
1544 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1545 
1561 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1562  uint16_t len)
1563 {
1565 
1566  if (unlikely(len > rte_pktmbuf_headroom(m)))
1567  return NULL;
1568 
1569  /* NB: elaborating the subtraction like this instead of using
1570  * -= allows us to ensure the result type is uint16_t
1571  * avoiding compiler warnings on gcc 8.1 at least */
1572  m->data_off = (uint16_t)(m->data_off - len);
1573  m->data_len = (uint16_t)(m->data_len + len);
1574  m->pkt_len = (m->pkt_len + len);
1575 
1576  return (char *)m->buf_addr + m->data_off;
1577 }
1578 
1594 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1595 {
1596  void *tail;
1597  struct rte_mbuf *m_last;
1598 
1600 
1601  m_last = rte_pktmbuf_lastseg(m);
1602  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1603  return NULL;
1604 
1605  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1606  m_last->data_len = (uint16_t)(m_last->data_len + len);
1607  m->pkt_len = (m->pkt_len + len);
1608  return (char*) tail;
1609 }
1610 
1625 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1626 {
1628 
1629  if (unlikely(len > m->data_len))
1630  return NULL;
1631 
1632  /* NB: elaborating the addition like this instead of using
1633  * += allows us to ensure the result type is uint16_t
1634  * avoiding compiler warnings on gcc 8.1 at least */
1635  m->data_len = (uint16_t)(m->data_len - len);
1636  m->data_off = (uint16_t)(m->data_off + len);
1637  m->pkt_len = (m->pkt_len - len);
1638  return (char *)m->buf_addr + m->data_off;
1639 }
1640 
1655 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1656 {
1657  struct rte_mbuf *m_last;
1658 
1660 
1661  m_last = rte_pktmbuf_lastseg(m);
1662  if (unlikely(len > m_last->data_len))
1663  return -1;
1664 
1665  m_last->data_len = (uint16_t)(m_last->data_len - len);
1666  m->pkt_len = (m->pkt_len - len);
1667  return 0;
1668 }
1669 
1679 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1680 {
1682  return m->nb_segs == 1;
1683 }
1684 
1688 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1689  uint32_t len, void *buf);
1690 
1711 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1712  uint32_t off, uint32_t len, void *buf)
1713 {
1714  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1715  return rte_pktmbuf_mtod_offset(m, char *, off);
1716  else
1717  return __rte_pktmbuf_read(m, off, len, buf);
1718 }
1719 
1736 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1737 {
1738  struct rte_mbuf *cur_tail;
1739 
1740  /* Check for number-of-segments-overflow */
1741  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1742  return -EOVERFLOW;
1743 
1744  /* Chain 'tail' onto the old tail */
1745  cur_tail = rte_pktmbuf_lastseg(head);
1746  cur_tail->next = tail;
1747 
1748  /* accumulate number of segments and total length.
1749  * NB: elaborating the addition like this instead of using
1750  * -= allows us to ensure the result type is uint16_t
1751  * avoiding compiler warnings on gcc 8.1 at least */
1752  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1753  head->pkt_len += tail->pkt_len;
1754 
1755  /* pkt_len is only set in the head */
1756  tail->pkt_len = tail->data_len;
1757 
1758  return 0;
1759 }
1760 
1761 /*
1762  * @warning
1763  * @b EXPERIMENTAL: This API may change without prior notice.
1764  *
1765  * For given input values generate raw tx_offload value.
1766  * Note that it is caller responsibility to make sure that input parameters
1767  * don't exceed maximum bit-field values.
1768  * @param il2
1769  * l2_len value.
1770  * @param il3
1771  * l3_len value.
1772  * @param il4
1773  * l4_len value.
1774  * @param tso
1775  * tso_segsz value.
1776  * @param ol3
1777  * outer_l3_len value.
1778  * @param ol2
1779  * outer_l2_len value.
1780  * @param unused
1781  * unused value.
1782  * @return
1783  * raw tx_offload value.
1784  */
1785 static __rte_always_inline uint64_t
1786 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1787  uint64_t ol3, uint64_t ol2, uint64_t unused)
1788 {
1789  return il2 << RTE_MBUF_L2_LEN_OFS |
1790  il3 << RTE_MBUF_L3_LEN_OFS |
1791  il4 << RTE_MBUF_L4_LEN_OFS |
1792  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1793  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1794  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1795  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1796 }
1797 
1808 static inline int
1810 {
1811  uint64_t ol_flags = m->ol_flags;
1812 
1813  /* Does packet set any of available offloads? */
1814  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1815  return 0;
1816 
1817  /* IP checksum can be counted only for IPv4 packet */
1818  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1819  return -EINVAL;
1820 
1821  /* IP type not set when required */
1822  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1823  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1824  return -EINVAL;
1825 
1826  /* Check requirements for TSO packet */
1827  if (ol_flags & PKT_TX_TCP_SEG)
1828  if ((m->tso_segsz == 0) ||
1829  ((ol_flags & PKT_TX_IPV4) &&
1830  !(ol_flags & PKT_TX_IP_CKSUM)))
1831  return -EINVAL;
1832 
1833  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1834  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1835  !(ol_flags & PKT_TX_OUTER_IPV4))
1836  return -EINVAL;
1837 
1838  return 0;
1839 }
1840 
1844 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1845 
1858 static inline int
1860 {
1861  if (rte_pktmbuf_is_contiguous(mbuf))
1862  return 0;
1863  return __rte_pktmbuf_linearize(mbuf);
1864 }
1865 
1880 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1881 
1885 static inline uint32_t
1887 {
1888  return m->hash.sched.queue_id;
1889 }
1890 
1894 static inline uint8_t
1896 {
1897  return m->hash.sched.traffic_class;
1898 }
1899 
1903 static inline uint8_t
1905 {
1906  return m->hash.sched.color;
1907 }
1908 
1921 static inline void
1922 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1923  uint8_t *traffic_class,
1924  uint8_t *color)
1925 {
1926  struct rte_mbuf_sched sched = m->hash.sched;
1927 
1928  *queue_id = sched.queue_id;
1929  *traffic_class = sched.traffic_class;
1930  *color = sched.color;
1931 }
1932 
1936 static inline void
1938 {
1939  m->hash.sched.queue_id = queue_id;
1940 }
1941 
1945 static inline void
1947 {
1948  m->hash.sched.traffic_class = traffic_class;
1949 }
1950 
1954 static inline void
1956 {
1957  m->hash.sched.color = color;
1958 }
1959 
1972 static inline void
1974  uint8_t traffic_class,
1975  uint8_t color)
1976 {
1977  m->hash.sched = (struct rte_mbuf_sched){
1978  .queue_id = queue_id,
1979  .traffic_class = traffic_class,
1980  .color = color,
1981  .reserved = 0,
1982  };
1983 }
1984 
1985 #ifdef __cplusplus
1986 }
1987 #endif
1988 
1989 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:149
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:289
uint64_t timestamp
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:226
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:880
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1946
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:817
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1809
__rte_experimental struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1386
#define PKT_TX_OUTER_IP_CKSUM
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1886
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:277
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:418
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1370
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:181
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1736
__rte_experimental void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1895
#define PKT_TX_IPV4
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:208
unsigned int flags
Definition: rte_mempool.h:221
#define __rte_unused
Definition: rte_common.h:116
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint64_t tso_segsz
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1489
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:902
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:833
uint32_t cache_size
Definition: rte_mempool.h:224
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:128
#define PKT_TX_OUTER_IPV4
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:485
uint16_t nb_segs
#define IND_ATTACHED_MBUF
uint16_t port
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1318
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1679
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:248
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1973
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1503
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:594
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1922
#define unlikely(x)
uint16_t priv_size
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1937
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1955
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define RTE_MIN(a, b)
Definition: rte_common.h:573
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:337
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:428
#define PKT_TX_TCP_SEG
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1859
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1550
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:303
uint16_t elt_size
Definition: rte_mbuf.h:732
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1625
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:277
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1136
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1521
#define PKT_TX_IP_CKSUM
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1096
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1226
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:453
uint32_t pkt_len
uint64_t dynfield1[2]
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1544
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:419
uint32_t packet_type
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:847
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:796
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:977
#define PKT_TX_IPV6
#define PKT_TX_L4_MASK
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:467
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1594
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:437
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:167
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1655
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:252
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1711
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1561
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:569
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1472
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:253
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1518
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
Definition: rte_mbuf.h:226
__rte_experimental struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1904
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:317
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1673
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1389
rte_iova_t buf_iova
Definition: rte_mbuf.h:730
uint8_t traffic_class
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:260
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:111
#define PKT_TX_OFFLOAD_MASK
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1701
uint64_t tx_offload
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:213
uint16_t vlan_tci
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:326
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:131
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
#define EXT_ATTACHED_MBUF
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1059