DPDK  19.11.14
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_byteorder.h>
44 #include <rte_mbuf_ptype.h>
45 #include <rte_mbuf_core.h>
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
59 const char *rte_get_rx_ol_flag_name(uint64_t mask);
60 
73 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
74 
85 const char *rte_get_tx_ol_flag_name(uint64_t mask);
86 
99 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
100 
111 static inline void
113 {
114  rte_prefetch0(&m->cacheline0);
115 }
116 
128 static inline void
130 {
131 #if RTE_CACHE_LINE_SIZE == 64
132  rte_prefetch0(&m->cacheline1);
133 #else
134  RTE_SET_USED(m);
135 #endif
136 }
137 
138 
139 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
140 
149 static inline rte_iova_t
150 rte_mbuf_data_iova(const struct rte_mbuf *mb)
151 {
152  return mb->buf_iova + mb->data_off;
153 }
154 
155 __rte_deprecated
156 static inline phys_addr_t
157 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
158 {
159  return rte_mbuf_data_iova(mb);
160 }
161 
174 static inline rte_iova_t
176 {
177  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
178 }
179 
180 __rte_deprecated
181 static inline phys_addr_t
182 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
183 {
184  return rte_mbuf_data_iova_default(mb);
185 }
186 
195 static inline struct rte_mbuf *
197 {
198  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
199 }
200 
221 __rte_experimental
222 static inline char *
223 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
224 {
225  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
226 }
227 
239 __rte_experimental
240 static inline char *
242 {
243  /* gcc complains about calling this experimental function even
244  * when not using it. Hide it with ALLOW_EXPERIMENTAL_API.
245  */
246 #ifdef ALLOW_EXPERIMENTAL_API
247  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
248 #else
249  return NULL;
250 #endif
251 }
252 
266 static inline char *
268 {
269 #ifdef ALLOW_EXPERIMENTAL_API
270  return rte_mbuf_buf_addr(md, md->pool);
271 #else
272  char *buffer_addr;
273  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
274  return buffer_addr;
275 #endif
276 }
277 
290 __rte_experimental
291 static inline void *
293 {
294  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
295 }
296 
305  uint16_t mbuf_priv_size;
306  uint32_t flags;
307 };
308 
309 #ifdef RTE_LIBRTE_MBUF_DEBUG
310 
312 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
313 
314 #else /* RTE_LIBRTE_MBUF_DEBUG */
315 
317 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
318 
319 #endif /* RTE_LIBRTE_MBUF_DEBUG */
320 
321 #ifdef RTE_MBUF_REFCNT_ATOMIC
322 
330 static inline uint16_t
331 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
332 {
333  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
334 }
335 
343 static inline void
344 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
345 {
346  rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
347 }
348 
349 /* internal */
350 static inline uint16_t
351 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
352 {
353  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
354 }
355 
365 static inline uint16_t
366 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
367 {
368  /*
369  * The atomic_add is an expensive operation, so we don't want to
370  * call it in the case where we know we are the unique holder of
371  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
372  * operation has to be used because concurrent accesses on the
373  * reference counter can occur.
374  */
375  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
376  ++value;
377  rte_mbuf_refcnt_set(m, (uint16_t)value);
378  return (uint16_t)value;
379  }
380 
381  return __rte_mbuf_refcnt_update(m, value);
382 }
383 
384 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
385 
386 /* internal */
387 static inline uint16_t
388 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
389 {
390  m->refcnt = (uint16_t)(m->refcnt + value);
391  return m->refcnt;
392 }
393 
397 static inline uint16_t
398 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
399 {
400  return __rte_mbuf_refcnt_update(m, value);
401 }
402 
406 static inline uint16_t
408 {
409  return m->refcnt;
410 }
411 
415 static inline void
416 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
417 {
418  m->refcnt = new_value;
419 }
420 
421 #endif /* RTE_MBUF_REFCNT_ATOMIC */
422 
431 static inline uint16_t
433 {
434  return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
435 }
436 
445 static inline void
447  uint16_t new_value)
448 {
449  rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
450 }
451 
463 static inline uint16_t
465  int16_t value)
466 {
467  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
468  ++value;
469  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
470  return (uint16_t)value;
471  }
472 
473  return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
474 }
475 
477 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
478  if ((m) != NULL) \
479  rte_prefetch0(m); \
480 } while (0)
481 
482 
495 void
496 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
497 
517 __rte_experimental
518 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
519  const char **reason);
520 
521 #define MBUF_RAW_ALLOC_CHECK(m) do { \
522  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
523  RTE_ASSERT((m)->next == NULL); \
524  RTE_ASSERT((m)->nb_segs == 1); \
525  __rte_mbuf_sanity_check(m, 0); \
526 } while (0)
527 
547 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
548 {
549  struct rte_mbuf *m;
550 
551  if (rte_mempool_get(mp, (void **)&m) < 0)
552  return NULL;
553  MBUF_RAW_ALLOC_CHECK(m);
554  return m;
555 }
556 
571 static __rte_always_inline void
573 {
574  RTE_ASSERT(RTE_MBUF_DIRECT(m));
575  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
576  RTE_ASSERT(m->next == NULL);
577  RTE_ASSERT(m->nb_segs == 1);
579  rte_mempool_put(m->pool, m);
580 }
581 
601 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
602  void *m, unsigned i);
603 
604 
622 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
623 
658 struct rte_mempool *
659 rte_pktmbuf_pool_create(const char *name, unsigned n,
660  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
661  int socket_id);
662 
700 struct rte_mempool *
701 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
702  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
703  int socket_id, const char *ops_name);
704 
716 static inline uint16_t
718 {
719  struct rte_pktmbuf_pool_private *mbp_priv;
720 
721  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
722  return mbp_priv->mbuf_data_room_size;
723 }
724 
737 static inline uint16_t
739 {
740  struct rte_pktmbuf_pool_private *mbp_priv;
741 
742  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
743  return mbp_priv->mbuf_priv_size;
744 }
745 
754 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
755 {
756  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
757  (uint16_t)m->buf_len);
758 }
759 
768 #define MBUF_INVALID_PORT UINT16_MAX
769 
770 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
771 {
772  m->next = NULL;
773  m->pkt_len = 0;
774  m->tx_offload = 0;
775  m->vlan_tci = 0;
776  m->vlan_tci_outer = 0;
777  m->nb_segs = 1;
778  m->port = MBUF_INVALID_PORT;
779 
780  m->ol_flags = 0;
781  m->packet_type = 0;
783 
784  m->data_len = 0;
786 }
787 
801 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
802 {
803  struct rte_mbuf *m;
804  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
805  rte_pktmbuf_reset(m);
806  return m;
807 }
808 
823 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
824  struct rte_mbuf **mbufs, unsigned count)
825 {
826  unsigned idx = 0;
827  int rc;
828 
829  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
830  if (unlikely(rc))
831  return rc;
832 
833  /* To understand duff's device on loop unwinding optimization, see
834  * https://en.wikipedia.org/wiki/Duff's_device.
835  * Here while() loop is used rather than do() while{} to avoid extra
836  * check if count is zero.
837  */
838  switch (count % 4) {
839  case 0:
840  while (idx != count) {
841  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
842  rte_pktmbuf_reset(mbufs[idx]);
843  idx++;
844  /* fall-through */
845  case 3:
846  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
847  rte_pktmbuf_reset(mbufs[idx]);
848  idx++;
849  /* fall-through */
850  case 2:
851  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
852  rte_pktmbuf_reset(mbufs[idx]);
853  idx++;
854  /* fall-through */
855  case 1:
856  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
857  rte_pktmbuf_reset(mbufs[idx]);
858  idx++;
859  /* fall-through */
860  }
861  }
862  return 0;
863 }
864 
897 static inline struct rte_mbuf_ext_shared_info *
898 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
900 {
901  struct rte_mbuf_ext_shared_info *shinfo;
902  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
903  void *addr;
904 
905  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
906  sizeof(uintptr_t));
907  if (addr <= buf_addr)
908  return NULL;
909 
910  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
911  shinfo->free_cb = free_cb;
912  shinfo->fcb_opaque = fcb_opaque;
913  rte_mbuf_ext_refcnt_set(shinfo, 1);
914 
915  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
916  return shinfo;
917 }
918 
975 static inline void
976 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
977  rte_iova_t buf_iova, uint16_t buf_len,
978  struct rte_mbuf_ext_shared_info *shinfo)
979 {
980  /* mbuf should not be read-only */
981  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
982  RTE_ASSERT(shinfo->free_cb != NULL);
983 
984  m->buf_addr = buf_addr;
985  m->buf_iova = buf_iova;
986  m->buf_len = buf_len;
987 
988  m->data_len = 0;
989  m->data_off = 0;
990 
992  m->shinfo = shinfo;
993 }
994 
1002 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1003 
1012 static inline void
1013 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1014 {
1015  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1016 }
1017 
1018 /* internal */
1019 static inline void
1020 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1021 {
1022  mdst->port = msrc->port;
1023  mdst->vlan_tci = msrc->vlan_tci;
1024  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1025  mdst->tx_offload = msrc->tx_offload;
1026  mdst->hash = msrc->hash;
1027  mdst->packet_type = msrc->packet_type;
1028  mdst->timestamp = msrc->timestamp;
1029  rte_mbuf_dynfield_copy(mdst, msrc);
1030 }
1031 
1053 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1054 {
1055  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1056  rte_mbuf_refcnt_read(mi) == 1);
1057 
1058  if (RTE_MBUF_HAS_EXTBUF(m)) {
1060  mi->ol_flags = m->ol_flags;
1061  mi->shinfo = m->shinfo;
1062  } else {
1063  /* if m is not direct, get the mbuf that embeds the data */
1065  mi->priv_size = m->priv_size;
1066  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1067  }
1068 
1069  __rte_pktmbuf_copy_hdr(mi, m);
1070 
1071  mi->data_off = m->data_off;
1072  mi->data_len = m->data_len;
1073  mi->buf_iova = m->buf_iova;
1074  mi->buf_addr = m->buf_addr;
1075  mi->buf_len = m->buf_len;
1076 
1077  mi->next = NULL;
1078  mi->pkt_len = mi->data_len;
1079  mi->nb_segs = 1;
1080 
1081  __rte_mbuf_sanity_check(mi, 1);
1083 }
1084 
1092 static inline void
1093 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1094 {
1095  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1096  RTE_ASSERT(m->shinfo != NULL);
1097 
1098  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1099  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1100 }
1101 
1108 static inline void
1109 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1110 {
1111  struct rte_mbuf *md;
1112 
1113  RTE_ASSERT(RTE_MBUF_CLONED(m));
1114 
1115  md = rte_mbuf_from_indirect(m);
1116 
1117  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1118  md->next = NULL;
1119  md->nb_segs = 1;
1120  rte_mbuf_refcnt_set(md, 1);
1121  rte_mbuf_raw_free(md);
1122  }
1123 }
1124 
1138 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1139 {
1140  struct rte_mempool *mp = m->pool;
1141  uint32_t mbuf_size, buf_len;
1142  uint16_t priv_size;
1143 
1144  if (RTE_MBUF_HAS_EXTBUF(m))
1145  __rte_pktmbuf_free_extbuf(m);
1146  else
1147  __rte_pktmbuf_free_direct(m);
1148 
1149  priv_size = rte_pktmbuf_priv_size(mp);
1150  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1151  buf_len = rte_pktmbuf_data_room_size(mp);
1152 
1153  m->priv_size = priv_size;
1154  m->buf_addr = (char *)m + mbuf_size;
1155  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1156  m->buf_len = (uint16_t)buf_len;
1158  m->data_len = 0;
1159  m->ol_flags = 0;
1160 }
1161 
1176 static __rte_always_inline struct rte_mbuf *
1178 {
1180 
1181  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1182 
1183  if (!RTE_MBUF_DIRECT(m))
1184  rte_pktmbuf_detach(m);
1185 
1186  if (m->next != NULL) {
1187  m->next = NULL;
1188  m->nb_segs = 1;
1189  }
1190 
1191  return m;
1192 
1193  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1194 
1195  if (!RTE_MBUF_DIRECT(m))
1196  rte_pktmbuf_detach(m);
1197 
1198  if (m->next != NULL) {
1199  m->next = NULL;
1200  m->nb_segs = 1;
1201  }
1202  rte_mbuf_refcnt_set(m, 1);
1203 
1204  return m;
1205  }
1206  return NULL;
1207 }
1208 
1218 static __rte_always_inline void
1220 {
1221  m = rte_pktmbuf_prefree_seg(m);
1222  if (likely(m != NULL))
1223  rte_mbuf_raw_free(m);
1224 }
1225 
1235 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1236 {
1237  struct rte_mbuf *m_next;
1238 
1239  if (m != NULL)
1241 
1242  while (m != NULL) {
1243  m_next = m->next;
1245  m = m_next;
1246  }
1247 }
1248 
1261 __rte_experimental
1262 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1263 
1281 struct rte_mbuf *
1282 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1283 
1305 __rte_experimental
1306 struct rte_mbuf *
1307 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1308  uint32_t offset, uint32_t length);
1309 
1321 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1322 {
1324 
1325  do {
1326  rte_mbuf_refcnt_update(m, v);
1327  } while ((m = m->next) != NULL);
1328 }
1329 
1338 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1339 {
1341  return m->data_off;
1342 }
1343 
1352 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1353 {
1355  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1356  m->data_len);
1357 }
1358 
1367 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1368 {
1370  while (m->next != NULL)
1371  m = m->next;
1372  return m;
1373 }
1374 
1375 /* deprecated */
1376 #define rte_pktmbuf_mtophys_offset(m, o) \
1377  rte_pktmbuf_iova_offset(m, o)
1378 
1379 /* deprecated */
1380 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1381 
1390 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1391 
1400 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1401 
1417 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1418  uint16_t len)
1419 {
1421 
1422  if (unlikely(len > rte_pktmbuf_headroom(m)))
1423  return NULL;
1424 
1425  /* NB: elaborating the subtraction like this instead of using
1426  * -= allows us to ensure the result type is uint16_t
1427  * avoiding compiler warnings on gcc 8.1 at least */
1428  m->data_off = (uint16_t)(m->data_off - len);
1429  m->data_len = (uint16_t)(m->data_len + len);
1430  m->pkt_len = (m->pkt_len + len);
1431 
1432  return (char *)m->buf_addr + m->data_off;
1433 }
1434 
1450 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1451 {
1452  void *tail;
1453  struct rte_mbuf *m_last;
1454 
1456 
1457  m_last = rte_pktmbuf_lastseg(m);
1458  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1459  return NULL;
1460 
1461  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1462  m_last->data_len = (uint16_t)(m_last->data_len + len);
1463  m->pkt_len = (m->pkt_len + len);
1464  return (char*) tail;
1465 }
1466 
1481 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1482 {
1484 
1485  if (unlikely(len > m->data_len))
1486  return NULL;
1487 
1488  /* NB: elaborating the addition like this instead of using
1489  * += allows us to ensure the result type is uint16_t
1490  * avoiding compiler warnings on gcc 8.1 at least */
1491  m->data_len = (uint16_t)(m->data_len - len);
1492  m->data_off = (uint16_t)(m->data_off + len);
1493  m->pkt_len = (m->pkt_len - len);
1494  return (char *)m->buf_addr + m->data_off;
1495 }
1496 
1511 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1512 {
1513  struct rte_mbuf *m_last;
1514 
1516 
1517  m_last = rte_pktmbuf_lastseg(m);
1518  if (unlikely(len > m_last->data_len))
1519  return -1;
1520 
1521  m_last->data_len = (uint16_t)(m_last->data_len - len);
1522  m->pkt_len = (m->pkt_len - len);
1523  return 0;
1524 }
1525 
1535 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1536 {
1538  return m->nb_segs == 1;
1539 }
1540 
1544 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1545  uint32_t len, void *buf);
1546 
1567 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1568  uint32_t off, uint32_t len, void *buf)
1569 {
1570  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1571  return rte_pktmbuf_mtod_offset(m, char *, off);
1572  else
1573  return __rte_pktmbuf_read(m, off, len, buf);
1574 }
1575 
1592 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1593 {
1594  struct rte_mbuf *cur_tail;
1595 
1596  /* Check for number-of-segments-overflow */
1597  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1598  return -EOVERFLOW;
1599 
1600  /* Chain 'tail' onto the old tail */
1601  cur_tail = rte_pktmbuf_lastseg(head);
1602  cur_tail->next = tail;
1603 
1604  /* accumulate number of segments and total length.
1605  * NB: elaborating the addition like this instead of using
1606  * -= allows us to ensure the result type is uint16_t
1607  * avoiding compiler warnings on gcc 8.1 at least */
1608  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1609  head->pkt_len += tail->pkt_len;
1610 
1611  /* pkt_len is only set in the head */
1612  tail->pkt_len = tail->data_len;
1613 
1614  return 0;
1615 }
1616 
1617 /*
1618  * @warning
1619  * @b EXPERIMENTAL: This API may change without prior notice.
1620  *
1621  * For given input values generate raw tx_offload value.
1622  * Note that it is caller responsibility to make sure that input parameters
1623  * don't exceed maximum bit-field values.
1624  * @param il2
1625  * l2_len value.
1626  * @param il3
1627  * l3_len value.
1628  * @param il4
1629  * l4_len value.
1630  * @param tso
1631  * tso_segsz value.
1632  * @param ol3
1633  * outer_l3_len value.
1634  * @param ol2
1635  * outer_l2_len value.
1636  * @param unused
1637  * unused value.
1638  * @return
1639  * raw tx_offload value.
1640  */
1641 static __rte_always_inline uint64_t
1642 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1643  uint64_t ol3, uint64_t ol2, uint64_t unused)
1644 {
1645  return il2 << RTE_MBUF_L2_LEN_OFS |
1646  il3 << RTE_MBUF_L3_LEN_OFS |
1647  il4 << RTE_MBUF_L4_LEN_OFS |
1648  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1649  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1650  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1651  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1652 }
1653 
1664 static inline int
1666 {
1667  uint64_t ol_flags = m->ol_flags;
1668 
1669  /* Does packet set any of available offloads? */
1670  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1671  return 0;
1672 
1673  /* IP checksum can be counted only for IPv4 packet */
1674  if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
1675  return -EINVAL;
1676 
1677  /* IP type not set when required */
1678  if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
1679  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1680  return -EINVAL;
1681 
1682  /* Check requirements for TSO packet */
1683  if (ol_flags & PKT_TX_TCP_SEG)
1684  if ((m->tso_segsz == 0) ||
1685  ((ol_flags & PKT_TX_IPV4) &&
1686  !(ol_flags & PKT_TX_IP_CKSUM)))
1687  return -EINVAL;
1688 
1689  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1690  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1691  !(ol_flags & PKT_TX_OUTER_IPV4))
1692  return -EINVAL;
1693 
1694  return 0;
1695 }
1696 
1700 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1701 
1714 static inline int
1716 {
1717  if (rte_pktmbuf_is_contiguous(mbuf))
1718  return 0;
1719  return __rte_pktmbuf_linearize(mbuf);
1720 }
1721 
1736 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1737 
1741 static inline uint32_t
1743 {
1744  return m->hash.sched.queue_id;
1745 }
1746 
1750 static inline uint8_t
1752 {
1753  return m->hash.sched.traffic_class;
1754 }
1755 
1759 static inline uint8_t
1761 {
1762  return m->hash.sched.color;
1763 }
1764 
1777 static inline void
1778 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1779  uint8_t *traffic_class,
1780  uint8_t *color)
1781 {
1782  struct rte_mbuf_sched sched = m->hash.sched;
1783 
1784  *queue_id = sched.queue_id;
1785  *traffic_class = sched.traffic_class;
1786  *color = sched.color;
1787 }
1788 
1792 static inline void
1794 {
1795  m->hash.sched.queue_id = queue_id;
1796 }
1797 
1801 static inline void
1803 {
1804  m->hash.sched.traffic_class = traffic_class;
1805 }
1806 
1810 static inline void
1812 {
1813  m->hash.sched.color = color;
1814 }
1815 
1828 static inline void
1830  uint8_t traffic_class,
1831  uint8_t color)
1832 {
1833  m->hash.sched = (struct rte_mbuf_sched){
1834  .queue_id = queue_id,
1835  .traffic_class = traffic_class,
1836  .color = color,
1837  .reserved = 0,
1838  };
1839 }
1840 
1841 #ifdef __cplusplus
1842 }
1843 #endif
1844 
1845 #endif /* _RTE_MBUF_H_ */
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:150
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:304
uint64_t timestamp
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:158
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:253
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:801
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1802
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:738
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1665
__rte_experimental struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1235
#define PKT_TX_OUTER_IP_CKSUM
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1742
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:199
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:340
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1219
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:196
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1592
__rte_experimental void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1751
#define PKT_TX_IPV4
static __rte_experimental char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:223
#define __rte_unused
Definition: rte_common.h:89
__rte_experimental int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
uint64_t tso_segsz
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1338
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:823
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:754
uint32_t cache_size
Definition: rte_mempool.h:233
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:129
#define PKT_TX_OUTER_IPV4
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:464
uint16_t nb_segs
#define IND_ATTACHED_MBUF
uint16_t port
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1177
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1535
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:170
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1829
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1352
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:572
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1778
#define unlikely(x)
uint16_t priv_size
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1793
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1811
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define RTE_MIN(a, b)
Definition: rte_common.h:483
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:317
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:407
#define PKT_TX_TCP_SEG
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1715
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1563
uint64_t phys_addr_t
Definition: rte_common.h:330
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:267
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1481
static __rte_experimental void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:292
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_CLONED(mb)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1053
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1535
#define PKT_TX_IP_CKSUM
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:348
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1013
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1138
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:432
uint32_t pkt_len
uint64_t dynfield1[2]
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1400
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:398
uint32_t packet_type
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:768
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:717
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:898
#define PKT_TX_IPV6
#define PKT_TX_L4_MASK
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:446
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1450
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:416
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:175
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1511
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:267
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1567
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1417
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:547
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1321
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:175
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1367
static __rte_experimental char * rte_mbuf_data_addr_default(__rte_unused struct rte_mbuf *mb)
Definition: rte_mbuf.h:241
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1760
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1689
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1404
uint8_t traffic_class
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:182
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:112
rte_atomic16_t refcnt_atomic
#define PKT_TX_OFFLOAD_MASK
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1717
uint64_t tx_offload
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:222
uint16_t vlan_tci
rte_atomic16_t refcnt_atomic
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:95
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
#define EXT_ATTACHED_MBUF
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:976