DPDK  24.07.0-rc3
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_prefetch.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mbuf_ptype.h>
42 #include <rte_mbuf_core.h>
43 
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47 
56 const char *rte_get_rx_ol_flag_name(uint64_t mask);
57 
70 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
71 
82 const char *rte_get_tx_ol_flag_name(uint64_t mask);
83 
96 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
97 
108 static inline void
110 {
111  rte_prefetch0(m);
112 }
113 
125 static inline void
127 {
128 #if RTE_CACHE_LINE_SIZE == 64
130 #else
131  RTE_SET_USED(m);
132 #endif
133 }
134 
135 
136 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
137 
146 static inline rte_iova_t
147 rte_mbuf_iova_get(const struct rte_mbuf *m)
148 {
149 #if RTE_IOVA_IN_MBUF
150  return m->buf_iova;
151 #else
152  return (rte_iova_t)m->buf_addr;
153 #endif
154 }
155 
164 static inline void
166 {
167 #if RTE_IOVA_IN_MBUF
168  m->buf_iova = iova;
169 #else
170  RTE_SET_USED(m);
171  RTE_SET_USED(iova);
172 #endif
173 }
174 
183 static inline rte_iova_t
184 rte_mbuf_data_iova(const struct rte_mbuf *mb)
185 {
186  return rte_mbuf_iova_get(mb) + mb->data_off;
187 }
188 
201 static inline rte_iova_t
203 {
204  return rte_mbuf_iova_get(mb) + RTE_PKTMBUF_HEADROOM;
205 }
206 
215 static inline struct rte_mbuf *
217 {
218  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
219 }
220 
236 static inline char *
237 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
238 {
239  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
240 }
241 
250 static inline char *
252 {
253  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
254 }
255 
269 static inline char *
271 {
272  return rte_mbuf_buf_addr(md, md->pool);
273 }
274 
287 static inline void *
289 {
290  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
291 }
292 
301  uint16_t mbuf_priv_size;
302  uint32_t flags;
303 };
304 
313 static inline uint32_t
315 {
316  struct rte_pktmbuf_pool_private *mbp_priv;
317 
318  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
319  return mbp_priv->flags;
320 }
321 
328 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
329 
337 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
338  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
339 
340 #ifdef RTE_LIBRTE_MBUF_DEBUG
341 
343 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
344 
345 #else /* RTE_LIBRTE_MBUF_DEBUG */
346 
348 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
349 
350 #endif /* RTE_LIBRTE_MBUF_DEBUG */
351 
352 #ifdef RTE_MBUF_REFCNT_ATOMIC
353 
361 static inline uint16_t
362 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
363 {
364  return rte_atomic_load_explicit(&m->refcnt, rte_memory_order_relaxed);
365 }
366 
374 static inline void
375 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
376 {
377  rte_atomic_store_explicit(&m->refcnt, new_value, rte_memory_order_relaxed);
378 }
379 
380 /* internal */
381 static inline uint16_t
382 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
383 {
384  return rte_atomic_fetch_add_explicit(&m->refcnt, value,
385  rte_memory_order_acq_rel) + value;
386 }
387 
397 static inline uint16_t
398 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
399 {
400  /*
401  * The atomic_add is an expensive operation, so we don't want to
402  * call it in the case where we know we are the unique holder of
403  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
404  * operation has to be used because concurrent accesses on the
405  * reference counter can occur.
406  */
407  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
408  ++value;
409  rte_mbuf_refcnt_set(m, (uint16_t)value);
410  return (uint16_t)value;
411  }
412 
413  return __rte_mbuf_refcnt_update(m, value);
414 }
415 
416 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
417 
418 /* internal */
419 static inline uint16_t
420 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
421 {
422  m->refcnt = (uint16_t)(m->refcnt + value);
423  return m->refcnt;
424 }
425 
429 static inline uint16_t
430 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
431 {
432  return __rte_mbuf_refcnt_update(m, value);
433 }
434 
438 static inline uint16_t
440 {
441  return m->refcnt;
442 }
443 
447 static inline void
448 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
449 {
450  m->refcnt = new_value;
451 }
452 
453 #endif /* RTE_MBUF_REFCNT_ATOMIC */
454 
463 static inline uint16_t
465 {
466  return rte_atomic_load_explicit(&shinfo->refcnt, rte_memory_order_relaxed);
467 }
468 
477 static inline void
479  uint16_t new_value)
480 {
481  rte_atomic_store_explicit(&shinfo->refcnt, new_value, rte_memory_order_relaxed);
482 }
483 
495 static inline uint16_t
497  int16_t value)
498 {
499  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
500  ++value;
501  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
502  return (uint16_t)value;
503  }
504 
505  return rte_atomic_fetch_add_explicit(&shinfo->refcnt, value,
506  rte_memory_order_acq_rel) + value;
507 }
508 
510 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
511  if ((m) != NULL) \
512  rte_prefetch0(m); \
513 } while (0)
514 
515 
528 void
529 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
530 
550 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
551  const char **reason);
552 
565 static __rte_always_inline void
567 {
568  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
569  RTE_ASSERT(m->next == NULL);
570  RTE_ASSERT(m->nb_segs == 1);
572 }
573 
575 #define MBUF_RAW_ALLOC_CHECK(m) __rte_mbuf_raw_sanity_check(m)
576 
596 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
597 {
598  struct rte_mbuf *m;
599 
600  if (rte_mempool_get(mp, (void **)&m) < 0)
601  return NULL;
603  return m;
604 }
605 
620 static __rte_always_inline void
622 {
623  RTE_ASSERT(!RTE_MBUF_CLONED(m) &&
626  rte_mempool_put(m->pool, m);
627 }
628 
651 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
652  void *m, unsigned i);
653 
674 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
675 
709 struct rte_mempool *
710 rte_pktmbuf_pool_create(const char *name, unsigned n,
711  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
712  int socket_id);
713 
750 struct rte_mempool *
751 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
752  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
753  int socket_id, const char *ops_name);
754 
757  void *buf_ptr;
759  size_t buf_len;
760  uint16_t elt_size;
761 };
762 
803 struct rte_mempool *
804 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
805  unsigned int cache_size, uint16_t priv_size,
806  uint16_t data_room_size, int socket_id,
807  const struct rte_pktmbuf_extmem *ext_mem,
808  unsigned int ext_num);
809 
821 static inline uint16_t
823 {
824  struct rte_pktmbuf_pool_private *mbp_priv;
825 
826  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
827  return mbp_priv->mbuf_data_room_size;
828 }
829 
842 static inline uint16_t
844 {
845  struct rte_pktmbuf_pool_private *mbp_priv;
846 
847  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
848  return mbp_priv->mbuf_priv_size;
849 }
850 
859 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
860 {
861  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
862  (uint16_t)m->buf_len);
863 }
864 
873 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
874 {
875  m->next = NULL;
876  m->pkt_len = 0;
877  m->tx_offload = 0;
878  m->vlan_tci = 0;
879  m->vlan_tci_outer = 0;
880  m->nb_segs = 1;
882 
884  m->packet_type = 0;
886 
887  m->data_len = 0;
889 }
890 
904 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
905 {
906  struct rte_mbuf *m;
907  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
909  return m;
910 }
911 
926 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
927  struct rte_mbuf **mbufs, unsigned count)
928 {
929  unsigned idx = 0;
930  int rc;
931 
932  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
933  if (unlikely(rc))
934  return rc;
935 
936  /* To understand duff's device on loop unwinding optimization, see
937  * https://en.wikipedia.org/wiki/Duff's_device.
938  * Here while() loop is used rather than do() while{} to avoid extra
939  * check if count is zero.
940  */
941  switch (count % 4) {
942  case 0:
943  while (idx != count) {
944  __rte_mbuf_raw_sanity_check(mbufs[idx]);
945  rte_pktmbuf_reset(mbufs[idx]);
946  idx++;
947  /* fall-through */
948  case 3:
949  __rte_mbuf_raw_sanity_check(mbufs[idx]);
950  rte_pktmbuf_reset(mbufs[idx]);
951  idx++;
952  /* fall-through */
953  case 2:
954  __rte_mbuf_raw_sanity_check(mbufs[idx]);
955  rte_pktmbuf_reset(mbufs[idx]);
956  idx++;
957  /* fall-through */
958  case 1:
959  __rte_mbuf_raw_sanity_check(mbufs[idx]);
960  rte_pktmbuf_reset(mbufs[idx]);
961  idx++;
962  /* fall-through */
963  }
964  }
965  return 0;
966 }
967 
1000 static inline struct rte_mbuf_ext_shared_info *
1001 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1003 {
1004  struct rte_mbuf_ext_shared_info *shinfo;
1005  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1006  void *addr;
1007 
1008  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1009  sizeof(uintptr_t));
1010  if (addr <= buf_addr)
1011  return NULL;
1012 
1013  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1014  shinfo->free_cb = free_cb;
1015  shinfo->fcb_opaque = fcb_opaque;
1016  rte_mbuf_ext_refcnt_set(shinfo, 1);
1017 
1018  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1019  return shinfo;
1020 }
1021 
1082 static inline void
1083 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1084  rte_iova_t buf_iova, uint16_t buf_len,
1085  struct rte_mbuf_ext_shared_info *shinfo)
1086 {
1087  /* mbuf should not be read-only */
1088  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1089  RTE_ASSERT(shinfo->free_cb != NULL);
1090 
1091  m->buf_addr = buf_addr;
1092  rte_mbuf_iova_set(m, buf_iova);
1093  m->buf_len = buf_len;
1094 
1095  m->data_len = 0;
1096  m->data_off = 0;
1097 
1099  m->shinfo = shinfo;
1100 }
1101 
1109 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1110 
1119 static inline void
1120 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1121 {
1122 #if !RTE_IOVA_IN_MBUF
1123  mdst->dynfield2 = msrc->dynfield2;
1124 #endif
1125  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1126 }
1127 
1128 /* internal */
1129 static inline void
1130 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1131 {
1132  mdst->port = msrc->port;
1133  mdst->vlan_tci = msrc->vlan_tci;
1134  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1135  mdst->tx_offload = msrc->tx_offload;
1136  mdst->hash = msrc->hash;
1137  mdst->packet_type = msrc->packet_type;
1138  rte_mbuf_dynfield_copy(mdst, msrc);
1139 }
1140 
1162 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1163 {
1164  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1165  rte_mbuf_refcnt_read(mi) == 1);
1166 
1167  if (RTE_MBUF_HAS_EXTBUF(m)) {
1169  mi->ol_flags = m->ol_flags;
1170  mi->shinfo = m->shinfo;
1171  } else {
1172  /* if m is not direct, get the mbuf that embeds the data */
1174  mi->priv_size = m->priv_size;
1176  }
1177 
1178  __rte_pktmbuf_copy_hdr(mi, m);
1179 
1180  mi->data_off = m->data_off;
1181  mi->data_len = m->data_len;
1183  mi->buf_addr = m->buf_addr;
1184  mi->buf_len = m->buf_len;
1185 
1186  mi->next = NULL;
1187  mi->pkt_len = mi->data_len;
1188  mi->nb_segs = 1;
1189 
1190  __rte_mbuf_sanity_check(mi, 1);
1192 }
1193 
1201 static inline void
1202 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1203 {
1204  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1205  RTE_ASSERT(m->shinfo != NULL);
1206 
1207  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1208  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1209 }
1210 
1217 static inline void
1218 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1219 {
1220  struct rte_mbuf *md;
1221 
1222  RTE_ASSERT(RTE_MBUF_CLONED(m));
1223 
1224  md = rte_mbuf_from_indirect(m);
1225 
1226  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1227  md->next = NULL;
1228  md->nb_segs = 1;
1229  rte_mbuf_refcnt_set(md, 1);
1230  rte_mbuf_raw_free(md);
1231  }
1232 }
1233 
1252 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1253 {
1254  struct rte_mempool *mp = m->pool;
1255  uint32_t mbuf_size, buf_len;
1256  uint16_t priv_size;
1257 
1258  if (RTE_MBUF_HAS_EXTBUF(m)) {
1259  /*
1260  * The mbuf has the external attached buffer,
1261  * we should check the type of the memory pool where
1262  * the mbuf was allocated from to detect the pinned
1263  * external buffer.
1264  */
1265  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1266 
1267  if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
1268  /*
1269  * The pinned external buffer should not be
1270  * detached from its backing mbuf, just exit.
1271  */
1272  return;
1273  }
1274  __rte_pktmbuf_free_extbuf(m);
1275  } else {
1276  __rte_pktmbuf_free_direct(m);
1277  }
1278  priv_size = rte_pktmbuf_priv_size(mp);
1279  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1280  buf_len = rte_pktmbuf_data_room_size(mp);
1281 
1282  m->priv_size = priv_size;
1283  m->buf_addr = (char *)m + mbuf_size;
1284  rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
1285  m->buf_len = (uint16_t)buf_len;
1287  m->data_len = 0;
1288  m->ol_flags = 0;
1289 }
1290 
1304 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1305 {
1306  struct rte_mbuf_ext_shared_info *shinfo;
1307 
1308  /* Clear flags, mbuf is being freed. */
1310  shinfo = m->shinfo;
1311 
1312  /* Optimize for performance - do not dec/reinit */
1313  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1314  return 0;
1315 
1316  /*
1317  * Direct usage of add primitive to avoid
1318  * duplication of comparing with one.
1319  */
1320  if (likely(rte_atomic_fetch_add_explicit(&shinfo->refcnt, -1,
1321  rte_memory_order_acq_rel) - 1))
1322  return 1;
1323 
1324  /* Reinitialize counter before mbuf freeing. */
1325  rte_mbuf_ext_refcnt_set(shinfo, 1);
1326  return 0;
1327 }
1328 
1343 static __rte_always_inline struct rte_mbuf *
1345 {
1347 
1348  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1349 
1350  if (!RTE_MBUF_DIRECT(m)) {
1351  rte_pktmbuf_detach(m);
1352  if (RTE_MBUF_HAS_EXTBUF(m) &&
1354  __rte_pktmbuf_pinned_extbuf_decref(m))
1355  return NULL;
1356  }
1357 
1358  if (m->next != NULL)
1359  m->next = NULL;
1360  if (m->nb_segs != 1)
1361  m->nb_segs = 1;
1362 
1363  return m;
1364 
1365  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1366 
1367  if (!RTE_MBUF_DIRECT(m)) {
1368  rte_pktmbuf_detach(m);
1369  if (RTE_MBUF_HAS_EXTBUF(m) &&
1371  __rte_pktmbuf_pinned_extbuf_decref(m))
1372  return NULL;
1373  }
1374 
1375  if (m->next != NULL)
1376  m->next = NULL;
1377  if (m->nb_segs != 1)
1378  m->nb_segs = 1;
1379  rte_mbuf_refcnt_set(m, 1);
1380 
1381  return m;
1382  }
1383  return NULL;
1384 }
1385 
1395 static __rte_always_inline void
1397 {
1398  m = rte_pktmbuf_prefree_seg(m);
1399  if (likely(m != NULL))
1400  rte_mbuf_raw_free(m);
1401 }
1402 
1412 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1413 {
1414  struct rte_mbuf *m_next;
1415 
1416  if (m != NULL)
1418 
1419  while (m != NULL) {
1420  m_next = m->next;
1422  m = m_next;
1423  }
1424 }
1425 
1438 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1439 
1457 struct rte_mbuf *
1458 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1459 
1481 struct rte_mbuf *
1482 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1483  uint32_t offset, uint32_t length);
1484 
1496 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1497 {
1499 
1500  do {
1501  rte_mbuf_refcnt_update(m, v);
1502  } while ((m = m->next) != NULL);
1503 }
1504 
1513 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1514 {
1516  return m->data_off;
1517 }
1518 
1527 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1528 {
1530  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1531  m->data_len);
1532 }
1533 
1542 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1543 {
1545  while (m->next != NULL)
1546  m = m->next;
1547  return m;
1548 }
1549 
1558 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1559 
1568 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1569 
1585 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1586  uint16_t len)
1587 {
1589 
1590  if (unlikely(len > rte_pktmbuf_headroom(m)))
1591  return NULL;
1592 
1593  /* NB: elaborating the subtraction like this instead of using
1594  * -= allows us to ensure the result type is uint16_t
1595  * avoiding compiler warnings on gcc 8.1 at least */
1596  m->data_off = (uint16_t)(m->data_off - len);
1597  m->data_len = (uint16_t)(m->data_len + len);
1598  m->pkt_len = (m->pkt_len + len);
1599 
1600  return (char *)m->buf_addr + m->data_off;
1601 }
1602 
1618 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1619 {
1620  void *tail;
1621  struct rte_mbuf *m_last;
1622 
1624 
1625  m_last = rte_pktmbuf_lastseg(m);
1626  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1627  return NULL;
1628 
1629  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1630  m_last->data_len = (uint16_t)(m_last->data_len + len);
1631  m->pkt_len = (m->pkt_len + len);
1632  return (char*) tail;
1633 }
1634 
1649 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1650 {
1652 
1653  if (unlikely(len > m->data_len))
1654  return NULL;
1655 
1656  /* NB: elaborating the addition like this instead of using
1657  * += allows us to ensure the result type is uint16_t
1658  * avoiding compiler warnings on gcc 8.1 at least */
1659  m->data_len = (uint16_t)(m->data_len - len);
1660  m->data_off = (uint16_t)(m->data_off + len);
1661  m->pkt_len = (m->pkt_len - len);
1662  return (char *)m->buf_addr + m->data_off;
1663 }
1664 
1679 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1680 {
1681  struct rte_mbuf *m_last;
1682 
1684 
1685  m_last = rte_pktmbuf_lastseg(m);
1686  if (unlikely(len > m_last->data_len))
1687  return -1;
1688 
1689  m_last->data_len = (uint16_t)(m_last->data_len - len);
1690  m->pkt_len = (m->pkt_len - len);
1691  return 0;
1692 }
1693 
1703 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1704 {
1706  return m->nb_segs == 1;
1707 }
1708 
1712 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1713  uint32_t len, void *buf);
1714 
1735 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1736  uint32_t off, uint32_t len, void *buf)
1737 {
1738  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1739  return rte_pktmbuf_mtod_offset(m, char *, off);
1740  else
1741  return __rte_pktmbuf_read(m, off, len, buf);
1742 }
1743 
1760 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1761 {
1762  struct rte_mbuf *cur_tail;
1763 
1764  /* Check for number-of-segments-overflow */
1765  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1766  return -EOVERFLOW;
1767 
1768  /* Chain 'tail' onto the old tail */
1769  cur_tail = rte_pktmbuf_lastseg(head);
1770  cur_tail->next = tail;
1771 
1772  /* accumulate number of segments and total length.
1773  * NB: elaborating the addition like this instead of using
1774  * -= allows us to ensure the result type is uint16_t
1775  * avoiding compiler warnings on gcc 8.1 at least */
1776  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1777  head->pkt_len += tail->pkt_len;
1778 
1779  /* pkt_len is only set in the head */
1780  tail->pkt_len = tail->data_len;
1781 
1782  return 0;
1783 }
1784 
1806 static __rte_always_inline uint64_t
1807 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1808  uint64_t ol3, uint64_t ol2, uint64_t unused)
1809 {
1810  return il2 << RTE_MBUF_L2_LEN_OFS |
1811  il3 << RTE_MBUF_L3_LEN_OFS |
1812  il4 << RTE_MBUF_L4_LEN_OFS |
1813  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1814  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1815  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1816  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1817 }
1818 
1829 static inline int
1831 {
1832  uint64_t ol_flags = m->ol_flags;
1833 
1834  /* Does packet set any of available offloads? */
1835  if (!(ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK))
1836  return 0;
1837 
1838  /* IP checksum can be counted only for IPv4 packet */
1839  if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && (ol_flags & RTE_MBUF_F_TX_IPV6))
1840  return -EINVAL;
1841 
1842  /* IP type not set when required */
1843  if (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
1844  if (!(ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)))
1845  return -EINVAL;
1846 
1847  /* Check requirements for TSO packet */
1848  if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1849  if ((m->tso_segsz == 0) ||
1850  ((ol_flags & RTE_MBUF_F_TX_IPV4) &&
1851  !(ol_flags & RTE_MBUF_F_TX_IP_CKSUM)))
1852  return -EINVAL;
1853 
1854  /* RTE_MBUF_F_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1855  if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) &&
1856  !(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4))
1857  return -EINVAL;
1858 
1859  return 0;
1860 }
1861 
1865 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1866 
1879 static inline int
1881 {
1882  if (rte_pktmbuf_is_contiguous(mbuf))
1883  return 0;
1884  return __rte_pktmbuf_linearize(mbuf);
1885 }
1886 
1901 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1902 
1906 static inline uint32_t
1908 {
1909  return m->hash.sched.queue_id;
1910 }
1911 
1915 static inline uint8_t
1917 {
1918  return m->hash.sched.traffic_class;
1919 }
1920 
1924 static inline uint8_t
1926 {
1927  return m->hash.sched.color;
1928 }
1929 
1942 static inline void
1943 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1944  uint8_t *traffic_class,
1945  uint8_t *color)
1946 {
1947  struct rte_mbuf_sched sched = m->hash.sched;
1948 
1949  *queue_id = sched.queue_id;
1950  *traffic_class = sched.traffic_class;
1951  *color = sched.color;
1952 }
1953 
1957 static inline void
1959 {
1960  m->hash.sched.queue_id = queue_id;
1961 }
1962 
1966 static inline void
1968 {
1969  m->hash.sched.traffic_class = traffic_class;
1970 }
1971 
1975 static inline void
1977 {
1978  m->hash.sched.color = color;
1979 }
1980 
1993 static inline void
1995  uint8_t traffic_class,
1996  uint8_t color)
1997 {
1998  m->hash.sched = (struct rte_mbuf_sched){
1999  .queue_id = queue_id,
2000  .traffic_class = traffic_class,
2001  .color = color,
2002  .reserved = 0,
2003  };
2004 }
2005 
2006 #ifdef __cplusplus
2007 }
2008 #endif
2009 
2010 #endif /* _RTE_MBUF_H_ */
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:873
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:184
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:300
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:370
#define RTE_MBUF_F_TX_OFFLOAD_MASK
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:904
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1967
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:843
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1830
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1412
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1907
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:439
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:599
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1396
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:216
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1760
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1916
#define __rte_unused
Definition: rte_common.h:171
static void rte_mbuf_iova_set(struct rte_mbuf *m, rte_iova_t iova)
Definition: rte_mbuf.h:165
#define RTE_MBUF_F_TX_IPV6
uint64_t tso_segsz
struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
static __rte_always_inline void __rte_mbuf_raw_sanity_check(__rte_unused const struct rte_mbuf *m)
Definition: rte_mbuf.h:566
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1513
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:926
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:859
static void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:288
uint32_t cache_size
Definition: rte_mempool.h:241
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:126
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:496
uint16_t nb_segs
uint16_t port
int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1344
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1703
struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:410
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define RTE_MBUF_MAX_NB_SEGS
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1994
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1527
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:621
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1943
#define unlikely(x)
uint16_t priv_size
static __rte_always_inline uint64_t rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, uint64_t ol3, uint64_t ol2, uint64_t unused)
Definition: rte_mbuf.h:1807
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1958
static char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:237
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1976
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define RTE_MIN(a, b)
Definition: rte_common.h:624
uint64_t dynfield2
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:348
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:439
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1880
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1708
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:314
uint16_t elt_size
Definition: rte_mbuf.h:760
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1649
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
#define RTE_CACHE_LINE_MIN_SIZE
Definition: rte_common.h:565
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1162
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1679
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1120
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1252
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:464
uint32_t pkt_len
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1568
#define RTE_MBUF_F_TX_L4_MASK
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:430
uint32_t packet_type
#define RTE_MBUF_F_TX_IP_CKSUM
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:822
#define RTE_MBUF_F_TX_TCP_SEG
#define RTE_MBUF_F_INDIRECT
#define RTE_MBUF_PORT_INVALID
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1001
#define RTE_MBUF_F_TX_OUTER_IPV4
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:478
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1618
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:448
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:202
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1679
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:270
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1735
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1585
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:596
static rte_iova_t rte_mbuf_iova_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:147
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1496
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:415
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1542
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1925
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:328
static char * rte_mbuf_data_addr_default(struct rte_mbuf *mb)
Definition: rte_mbuf.h:251
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1833
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1487
rte_iova_t buf_iova
Definition: rte_mbuf.h:758
uint8_t traffic_class
#define RTE_MBUF_F_TX_IPV4
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:422
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:109
#define RTE_MBUF_F_EXTERNAL
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1861
uint64_t tx_offload
uint16_t vlan_tci
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:337
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:187
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t dynfield1[9]
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1083