DPDK  20.08.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_prefetch.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_byteorder.h>
43 #include <rte_mbuf_ptype.h>
44 #include <rte_mbuf_core.h>
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
58 const char *rte_get_rx_ol_flag_name(uint64_t mask);
59 
72 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
73 
84 const char *rte_get_tx_ol_flag_name(uint64_t mask);
85 
98 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
99 
110 static inline void
112 {
113  rte_prefetch0(&m->cacheline0);
114 }
115 
127 static inline void
129 {
130 #if RTE_CACHE_LINE_SIZE == 64
131  rte_prefetch0(&m->cacheline1);
132 #else
133  RTE_SET_USED(m);
134 #endif
135 }
136 
137 
138 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
139 
148 static inline rte_iova_t
149 rte_mbuf_data_iova(const struct rte_mbuf *mb)
150 {
151  return mb->buf_iova + mb->data_off;
152 }
153 
154 __rte_deprecated
155 static inline phys_addr_t
156 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
157 {
158  return rte_mbuf_data_iova(mb);
159 }
160 
173 static inline rte_iova_t
175 {
176  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
177 }
178 
179 __rte_deprecated
180 static inline phys_addr_t
181 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
182 {
183  return rte_mbuf_data_iova_default(mb);
184 }
185 
194 static inline struct rte_mbuf *
196 {
197  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
198 }
199 
220 __rte_experimental
221 static inline char *
222 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
223 {
224  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
225 }
226 
238 __rte_experimental
239 static inline char *
241 {
242  /* gcc complains about calling this experimental function even
243  * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
244  */
245 #ifdef ALLOW_EXPERIMENTAL_API
246  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
247 #else
248  return NULL;
249 #endif
250 }
251 
265 static inline char *
267 {
268 #ifdef ALLOW_EXPERIMENTAL_API
269  return rte_mbuf_buf_addr(md, md->pool);
270 #else
271  char *buffer_addr;
272  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
273  return buffer_addr;
274 #endif
275 }
276 
289 __rte_experimental
290 static inline void *
292 {
293  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
294 }
295 
304  uint16_t mbuf_priv_size;
305  uint32_t flags;
306 };
307 
316 static inline uint32_t
318 {
319  struct rte_pktmbuf_pool_private *mbp_priv;
320 
321  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
322  return mbp_priv->flags;
323 }
324 
331 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
332 
340 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
341  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
342 
343 #ifdef RTE_LIBRTE_MBUF_DEBUG
344 
346 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
347 
348 #else /* RTE_LIBRTE_MBUF_DEBUG */
349 
351 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
352 
353 #endif /* RTE_LIBRTE_MBUF_DEBUG */
354 
355 #ifdef RTE_MBUF_REFCNT_ATOMIC
356 
364 static inline uint16_t
365 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
366 {
367  return __atomic_load_n(&m->refcnt, __ATOMIC_RELAXED);
368 }
369 
377 static inline void
378 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
379 {
380  __atomic_store_n(&m->refcnt, new_value, __ATOMIC_RELAXED);
381 }
382 
383 /* internal */
384 static inline uint16_t
385 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
386 {
387  return __atomic_add_fetch(&m->refcnt, (uint16_t)value,
388  __ATOMIC_ACQ_REL);
389 }
390 
400 static inline uint16_t
401 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
402 {
403  /*
404  * The atomic_add is an expensive operation, so we don't want to
405  * call it in the case where we know we are the unique holder of
406  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
407  * operation has to be used because concurrent accesses on the
408  * reference counter can occur.
409  */
410  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
411  ++value;
412  rte_mbuf_refcnt_set(m, (uint16_t)value);
413  return (uint16_t)value;
414  }
415 
416  return __rte_mbuf_refcnt_update(m, value);
417 }
418 
419 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
420 
421 /* internal */
422 static inline uint16_t
423 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
424 {
425  m->refcnt = (uint16_t)(m->refcnt + value);
426  return m->refcnt;
427 }
428 
432 static inline uint16_t
433 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
434 {
435  return __rte_mbuf_refcnt_update(m, value);
436 }
437 
441 static inline uint16_t
443 {
444  return m->refcnt;
445 }
446 
450 static inline void
451 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
452 {
453  m->refcnt = new_value;
454 }
455 
456 #endif /* RTE_MBUF_REFCNT_ATOMIC */
457 
466 static inline uint16_t
468 {
469  return __atomic_load_n(&shinfo->refcnt, __ATOMIC_RELAXED);
470 }
471 
480 static inline void
482  uint16_t new_value)
483 {
484  __atomic_store_n(&shinfo->refcnt, new_value, __ATOMIC_RELAXED);
485 }
486 
498 static inline uint16_t
500  int16_t value)
501 {
502  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
503  ++value;
504  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
505  return (uint16_t)value;
506  }
507 
508  return __atomic_add_fetch(&shinfo->refcnt, (uint16_t)value,
509  __ATOMIC_ACQ_REL);
510 }
511 
513 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
514  if ((m) != NULL) \
515  rte_prefetch0(m); \
516 } while (0)
517 
518 
531 void
532 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
533 
553 __rte_experimental
554 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
555  const char **reason);
556 
557 #define MBUF_RAW_ALLOC_CHECK(m) do { \
558  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
559  RTE_ASSERT((m)->next == NULL); \
560  RTE_ASSERT((m)->nb_segs == 1); \
561  __rte_mbuf_sanity_check(m, 0); \
562 } while (0)
563 
583 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
584 {
585  struct rte_mbuf *m;
586 
587  if (rte_mempool_get(mp, (void **)&m) < 0)
588  return NULL;
589  MBUF_RAW_ALLOC_CHECK(m);
590  return m;
591 }
592 
607 static __rte_always_inline void
609 {
610  RTE_ASSERT(!RTE_MBUF_CLONED(m) &&
612  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
613  RTE_ASSERT(m->next == NULL);
614  RTE_ASSERT(m->nb_segs == 1);
616  rte_mempool_put(m->pool, m);
617 }
618 
638 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
639  void *m, unsigned i);
640 
658 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
659 
694 struct rte_mempool *
695 rte_pktmbuf_pool_create(const char *name, unsigned n,
696  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
697  int socket_id);
698 
736 struct rte_mempool *
737 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
738  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
739  int socket_id, const char *ops_name);
740 
743  void *buf_ptr;
745  size_t buf_len;
746  uint16_t elt_size;
747 };
748 
790 __rte_experimental
791 struct rte_mempool *
792 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
793  unsigned int cache_size, uint16_t priv_size,
794  uint16_t data_room_size, int socket_id,
795  const struct rte_pktmbuf_extmem *ext_mem,
796  unsigned int ext_num);
797 
809 static inline uint16_t
811 {
812  struct rte_pktmbuf_pool_private *mbp_priv;
813 
814  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
815  return mbp_priv->mbuf_data_room_size;
816 }
817 
830 static inline uint16_t
832 {
833  struct rte_pktmbuf_pool_private *mbp_priv;
834 
835  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
836  return mbp_priv->mbuf_priv_size;
837 }
838 
847 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
848 {
849  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
850  (uint16_t)m->buf_len);
851 }
852 
861 #define MBUF_INVALID_PORT UINT16_MAX
862 
863 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
864 {
865  m->next = NULL;
866  m->pkt_len = 0;
867  m->tx_offload = 0;
868  m->vlan_tci = 0;
869  m->vlan_tci_outer = 0;
870  m->nb_segs = 1;
871  m->port = MBUF_INVALID_PORT;
872 
874  m->packet_type = 0;
876 
877  m->data_len = 0;
879 }
880 
894 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
895 {
896  struct rte_mbuf *m;
897  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
898  rte_pktmbuf_reset(m);
899  return m;
900 }
901 
916 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
917  struct rte_mbuf **mbufs, unsigned count)
918 {
919  unsigned idx = 0;
920  int rc;
921 
922  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
923  if (unlikely(rc))
924  return rc;
925 
926  /* To understand duff's device on loop unwinding optimization, see
927  * https://en.wikipedia.org/wiki/Duff's_device.
928  * Here while() loop is used rather than do() while{} to avoid extra
929  * check if count is zero.
930  */
931  switch (count % 4) {
932  case 0:
933  while (idx != count) {
934  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
935  rte_pktmbuf_reset(mbufs[idx]);
936  idx++;
937  /* fall-through */
938  case 3:
939  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
940  rte_pktmbuf_reset(mbufs[idx]);
941  idx++;
942  /* fall-through */
943  case 2:
944  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
945  rte_pktmbuf_reset(mbufs[idx]);
946  idx++;
947  /* fall-through */
948  case 1:
949  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
950  rte_pktmbuf_reset(mbufs[idx]);
951  idx++;
952  /* fall-through */
953  }
954  }
955  return 0;
956 }
957 
990 static inline struct rte_mbuf_ext_shared_info *
991 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
993 {
994  struct rte_mbuf_ext_shared_info *shinfo;
995  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
996  void *addr;
997 
998  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
999  sizeof(uintptr_t));
1000  if (addr <= buf_addr)
1001  return NULL;
1002 
1003  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1004  shinfo->free_cb = free_cb;
1005  shinfo->fcb_opaque = fcb_opaque;
1006  rte_mbuf_ext_refcnt_set(shinfo, 1);
1007 
1008  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1009  return shinfo;
1010 }
1011 
1072 static inline void
1073 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1074  rte_iova_t buf_iova, uint16_t buf_len,
1075  struct rte_mbuf_ext_shared_info *shinfo)
1076 {
1077  /* mbuf should not be read-only */
1078  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1079  RTE_ASSERT(shinfo->free_cb != NULL);
1080 
1081  m->buf_addr = buf_addr;
1082  m->buf_iova = buf_iova;
1083  m->buf_len = buf_len;
1084 
1085  m->data_len = 0;
1086  m->data_off = 0;
1087 
1089  m->shinfo = shinfo;
1090 }
1091 
1099 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1100 
1109 static inline void
1110 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1111 {
1112  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1113 }
1114 
1115 /* internal */
1116 static inline void
1117 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1118 {
1119  mdst->port = msrc->port;
1120  mdst->vlan_tci = msrc->vlan_tci;
1121  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1122  mdst->tx_offload = msrc->tx_offload;
1123  mdst->hash = msrc->hash;
1124  mdst->packet_type = msrc->packet_type;
1125  mdst->timestamp = msrc->timestamp;
1126  rte_mbuf_dynfield_copy(mdst, msrc);
1127 }
1128 
1150 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1151 {
1152  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1153  rte_mbuf_refcnt_read(mi) == 1);
1154 
1155  if (RTE_MBUF_HAS_EXTBUF(m)) {
1157  mi->ol_flags = m->ol_flags;
1158  mi->shinfo = m->shinfo;
1159  } else {
1160  /* if m is not direct, get the mbuf that embeds the data */
1162  mi->priv_size = m->priv_size;
1163  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1164  }
1165 
1166  __rte_pktmbuf_copy_hdr(mi, m);
1167 
1168  mi->data_off = m->data_off;
1169  mi->data_len = m->data_len;
1170  mi->buf_iova = m->buf_iova;
1171  mi->buf_addr = m->buf_addr;
1172  mi->buf_len = m->buf_len;
1173 
1174  mi->next = NULL;
1175  mi->pkt_len = mi->data_len;
1176  mi->nb_segs = 1;
1177 
1178  __rte_mbuf_sanity_check(mi, 1);
1180 }
1181 
1189 static inline void
1190 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1191 {
1192  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1193  RTE_ASSERT(m->shinfo != NULL);
1194 
1195  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1196  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1197 }
1198 
1205 static inline void
1206 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1207 {
1208  struct rte_mbuf *md;
1209 
1210  RTE_ASSERT(RTE_MBUF_CLONED(m));
1211 
1212  md = rte_mbuf_from_indirect(m);
1213 
1214  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1215  md->next = NULL;
1216  md->nb_segs = 1;
1217  rte_mbuf_refcnt_set(md, 1);
1218  rte_mbuf_raw_free(md);
1219  }
1220 }
1221 
1240 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1241 {
1242  struct rte_mempool *mp = m->pool;
1243  uint32_t mbuf_size, buf_len;
1244  uint16_t priv_size;
1245 
1246  if (RTE_MBUF_HAS_EXTBUF(m)) {
1247  /*
1248  * The mbuf has the external attached buffer,
1249  * we should check the type of the memory pool where
1250  * the mbuf was allocated from to detect the pinned
1251  * external buffer.
1252  */
1253  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1254 
1255  if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
1256  /*
1257  * The pinned external buffer should not be
1258  * detached from its backing mbuf, just exit.
1259  */
1260  return;
1261  }
1262  __rte_pktmbuf_free_extbuf(m);
1263  } else {
1264  __rte_pktmbuf_free_direct(m);
1265  }
1266  priv_size = rte_pktmbuf_priv_size(mp);
1267  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1268  buf_len = rte_pktmbuf_data_room_size(mp);
1269 
1270  m->priv_size = priv_size;
1271  m->buf_addr = (char *)m + mbuf_size;
1272  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1273  m->buf_len = (uint16_t)buf_len;
1275  m->data_len = 0;
1276  m->ol_flags = 0;
1277 }
1278 
1292 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1293 {
1294  struct rte_mbuf_ext_shared_info *shinfo;
1295 
1296  /* Clear flags, mbuf is being freed. */
1298  shinfo = m->shinfo;
1299 
1300  /* Optimize for performance - do not dec/reinit */
1301  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1302  return 0;
1303 
1304  /*
1305  * Direct usage of add primitive to avoid
1306  * duplication of comparing with one.
1307  */
1308  if (likely(__atomic_add_fetch(&shinfo->refcnt, (uint16_t)-1,
1309  __ATOMIC_ACQ_REL)))
1310  return 1;
1311 
1312  /* Reinitialize counter before mbuf freeing. */
1313  rte_mbuf_ext_refcnt_set(shinfo, 1);
1314  return 0;
1315 }
1316 
1331 static __rte_always_inline struct rte_mbuf *
1333 {
1335 
1336  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1337 
1338  if (!RTE_MBUF_DIRECT(m)) {
1339  rte_pktmbuf_detach(m);
1340  if (RTE_MBUF_HAS_EXTBUF(m) &&
1342  __rte_pktmbuf_pinned_extbuf_decref(m))
1343  return NULL;
1344  }
1345 
1346  if (m->next != NULL) {
1347  m->next = NULL;
1348  m->nb_segs = 1;
1349  }
1350 
1351  return m;
1352 
1353  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1354 
1355  if (!RTE_MBUF_DIRECT(m)) {
1356  rte_pktmbuf_detach(m);
1357  if (RTE_MBUF_HAS_EXTBUF(m) &&
1359  __rte_pktmbuf_pinned_extbuf_decref(m))
1360  return NULL;
1361  }
1362 
1363  if (m->next != NULL) {
1364  m->next = NULL;
1365  m->nb_segs = 1;
1366  }
1367  rte_mbuf_refcnt_set(m, 1);
1368 
1369  return m;
1370  }
1371  return NULL;
1372 }
1373 
1383 static __rte_always_inline void
1385 {
1386  m = rte_pktmbuf_prefree_seg(m);
1387  if (likely(m != NULL))
1388  rte_mbuf_raw_free(m);
1389 }
1390 
1400 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1401 {
1402  struct rte_mbuf *m_next;
1403 
1404  if (m != NULL)
1406 
1407  while (m != NULL) {
1408  m_next = m->next;
1410  m = m_next;
1411  }
1412 }
1413 
1426 __rte_experimental
1427 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1428 
1446 struct rte_mbuf *
1447 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1448 
1470 __rte_experimental
1471 struct rte_mbuf *
1472 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1473  uint32_t offset, uint32_t length);
1474 
1486 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1487 {
1489 
1490  do {
1491  rte_mbuf_refcnt_update(m, v);
1492  } while ((m = m->next) != NULL);
1493 }
1494 
1503 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1504 {
1506  return m->data_off;
1507 }
1508 
1517 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1518 {
1520  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1521  m->data_len);
1522 }
1523 
1532 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1533 {
1535  while (m->next != NULL)
1536  m = m->next;
1537  return m;
1538 }
1539 
1540 /* deprecated */
1541 #define rte_pktmbuf_mtophys_offset(m, o) \
1542  rte_pktmbuf_iova_offset(m, o)
1543 
1544 /* deprecated */
1545 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1546 
1555 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1556 
1565 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1566 
1582 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1583  uint16_t len)
1584 {
1586 
1587  if (unlikely(len > rte_pktmbuf_headroom(m)))
1588  return NULL;
1589 
1590  /* NB: elaborating the subtraction like this instead of using
1591  * -= allows us to ensure the result type is uint16_t
1592  * avoiding compiler warnings on gcc 8.1 at least */
1593  m->data_off = (uint16_t)(m->data_off - len);
1594  m->data_len = (uint16_t)(m->data_len + len);
1595  m->pkt_len = (m->pkt_len + len);
1596 
1597  return (char *)m->buf_addr + m->data_off;
1598 }
1599 
1615 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1616 {
1617  void *tail;
1618  struct rte_mbuf *m_last;
1619 
1621 
1622  m_last = rte_pktmbuf_lastseg(m);
1623  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1624  return NULL;
1625 
1626  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1627  m_last->data_len = (uint16_t)(m_last->data_len + len);
1628  m->pkt_len = (m->pkt_len + len);
1629  return (char*) tail;
1630 }
1631 
1646 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1647 {
1649 
1650  if (unlikely(len > m->data_len))
1651  return NULL;
1652 
1653  /* NB: elaborating the addition like this instead of using
1654  * += allows us to ensure the result type is uint16_t
1655  * avoiding compiler warnings on gcc 8.1 at least */
1656  m->data_len = (uint16_t)(m->data_len - len);
1657  m->data_off = (uint16_t)(m->data_off + len);
1658  m->pkt_len = (m->pkt_len - len);
1659  return (char *)m->buf_addr + m->data_off;
1660 }
1661 
1676 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1677 {
1678  struct rte_mbuf *m_last;
1679 
1681 
1682  m_last = rte_pktmbuf_lastseg(m);
1683  if (unlikely(len > m_last->data_len))
1684  return -1;
1685 
1686  m_last->data_len = (uint16_t)(m_last->data_len - len);
1687  m->pkt_len = (m->pkt_len - len);
1688  return 0;
1689 }
1690 
1700 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1701 {
1703  return m->nb_segs == 1;
1704 }
1705 
1709 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1710  uint32_t len, void *buf);
1711 
1732 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1733  uint32_t off, uint32_t len, void *buf)
1734 {
1735  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1736  return rte_pktmbuf_mtod_offset(m, char *, off);
1737  else
1738  return __rte_pktmbuf_read(m, off, len, buf);
1739 }
1740 
1757 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1758 {
1759  struct rte_mbuf *cur_tail;
1760 
1761  /* Check for number-of-segments-overflow */
1762  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1763  return -EOVERFLOW;
1764 
1765  /* Chain 'tail' onto the old tail */
1766  cur_tail = rte_pktmbuf_lastseg(head);
1767  cur_tail->next = tail;
1768 
1769  /* accumulate number of segments and total length.
1770  * NB: elaborating the addition like this instead of using
1771  * -= allows us to ensure the result type is uint16_t
1772  * avoiding compiler warnings on gcc 8.1 at least */
1773  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1774  head->pkt_len += tail->pkt_len;
1775 
1776  /* pkt_len is only set in the head */
1777  tail->pkt_len = tail->data_len;
1778 
1779  return 0;
1780 }
1781 
1782 /*
1783  * @warning
1784  * @b EXPERIMENTAL: This API may change without prior notice.
1785  *
1786  * For given input values generate raw tx_offload value.
1787  * Note that it is caller responsibility to make sure that input parameters
1788  * don't exceed maximum bit-field values.
1789  * @param il2
1790  * l2_len value.
1791  * @param il3
1792  * l3_len value.
1793  * @param il4
1794  * l4_len value.
1795  * @param tso
1796  * tso_segsz value.
1797  * @param ol3
1798  * outer_l3_len value.
1799  * @param ol2
1800  * outer_l2_len value.
1801  * @param unused
1802  * unused value.
1803  * @return
1804  * raw tx_offload value.
1805  */
1806 static __rte_always_inline uint64_t
1807 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1808  uint64_t ol3, uint64_t ol2, uint64_t unused)
1809 {
1810  return il2 << RTE_MBUF_L2_LEN_OFS |
1811  il3 << RTE_MBUF_L3_LEN_OFS |
1812  il4 << RTE_MBUF_L4_LEN_OFS |
1813  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1814  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1815  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1816  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1817 }
1818 
1829 static inline int
1831 {
1832  uint64_t ol_flags = m->ol_flags;
1833 
1834  /* Does packet set any of available offloads? */
1835  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1836  return 0;
1837 
1838  /* IP checksum can be counted only for IPv4 packet */
1839  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1840  return -EINVAL;
1841 
1842  /* IP type not set when required */
1843  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1844  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1845  return -EINVAL;
1846 
1847  /* Check requirements for TSO packet */
1848  if (ol_flags & PKT_TX_TCP_SEG)
1849  if ((m->tso_segsz == 0) ||
1850  ((ol_flags & PKT_TX_IPV4) &&
1851  !(ol_flags & PKT_TX_IP_CKSUM)))
1852  return -EINVAL;
1853 
1854  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1855  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1856  !(ol_flags & PKT_TX_OUTER_IPV4))
1857  return -EINVAL;
1858 
1859  return 0;
1860 }
1861 
1865 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1866 
1879 static inline int
1881 {
1882  if (rte_pktmbuf_is_contiguous(mbuf))
1883  return 0;
1884  return __rte_pktmbuf_linearize(mbuf);
1885 }
1886 
1901 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1902 
1906 static inline uint32_t
1908 {
1909  return m->hash.sched.queue_id;
1910 }
1911 
1915 static inline uint8_t
1917 {
1918  return m->hash.sched.traffic_class;
1919 }
1920 
1924 static inline uint8_t
1926 {
1927  return m->hash.sched.color;
1928 }
1929 
1942 static inline void
1943 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1944  uint8_t *traffic_class,
1945  uint8_t *color)
1946 {
1947  struct rte_mbuf_sched sched = m->hash.sched;
1948 
1949  *queue_id = sched.queue_id;
1950  *traffic_class = sched.traffic_class;
1951  *color = sched.color;
1952 }
1953 
1957 static inline void
1959 {
1960  m->hash.sched.queue_id = queue_id;
1961 }
1962 
1966 static inline void
1968 {
1969  m->hash.sched.traffic_class = traffic_class;
1970 }
1971 
1975 static inline void
1977 {
1978  m->hash.sched.color = color;
1979 }
1980 
1993 static inline void
1995  uint8_t traffic_class,
1996  uint8_t color)
1997 {
1998  m->hash.sched = (struct rte_mbuf_sched){
1999  .queue_id = queue_id,
2000  .traffic_class = traffic_class,
2001  .color = color,
2002  .reserved = 0,
2003  };
2004 }
2005 
2006 #ifdef __cplusplus
2007 }
2008 #endif
2009 
2010 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:149
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:303
uint64_t timestamp
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:202
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:894
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1967
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:831
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1830
__rte_experimental struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1400
#define PKT_TX_OUTER_IP_CKSUM
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1907
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:253
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:394
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1384
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:195
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1757
__rte_experimental void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1916
#define PKT_TX_IPV4
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:222
unsigned int flags
Definition: rte_mempool.h:232
#define __rte_unused
Definition: rte_common.h:104
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint64_t tso_segsz
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1503
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:916
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:847
uint32_t cache_size
Definition: rte_mempool.h:235
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:128
#define PKT_TX_OUTER_IPV4
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:499
uint16_t nb_segs
#define IND_ATTACHED_MBUF
uint16_t port
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1332
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1700
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:224
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1994
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1517
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:608
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1943
#define unlikely(x)
uint16_t priv_size
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1958
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1976
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define RTE_MIN(a, b)
Definition: rte_common.h:549
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:351
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:442
#define PKT_TX_TCP_SEG
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1880
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1583
uint64_t phys_addr_t
Definition: rte_common.h:384
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:317
uint16_t elt_size
Definition: rte_mbuf.h:746
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1646
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:291
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1150
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1554
#define PKT_TX_IP_CKSUM
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1110
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1240
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:467
uint32_t pkt_len
uint64_t dynfield1[2]
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1565
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:433
uint32_t packet_type
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:861
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:810
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:991
#define PKT_TX_IPV6
#define PKT_TX_L4_MASK
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:481
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1615
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:451
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:174
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1676
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:266
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1732
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1582
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:583
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1486
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:229
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1532
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
Definition: rte_mbuf.h:240
__rte_experimental struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1925
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:331
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1710
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1422
rte_iova_t buf_iova
Definition: rte_mbuf.h:744
uint8_t traffic_class
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:236
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:111
#define PKT_TX_OFFLOAD_MASK
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1738
uint64_t tx_offload
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:224
uint16_t vlan_tci
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:340
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:119
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
#define EXT_ATTACHED_MBUF
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1073