DPDK  19.02.0
rte_mempool.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright(c) 2016 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MEMPOOL_H_
7 #define _RTE_MEMPOOL_H_
8 
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <stdint.h>
39 #include <errno.h>
40 #include <inttypes.h>
41 #include <sys/queue.h>
42 
43 #include <rte_config.h>
44 #include <rte_spinlock.h>
45 #include <rte_log.h>
46 #include <rte_debug.h>
47 #include <rte_lcore.h>
48 #include <rte_memory.h>
49 #include <rte_branch_prediction.h>
50 #include <rte_ring.h>
51 #include <rte_memcpy.h>
52 #include <rte_common.h>
53 
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57 
58 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
59 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
60 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
62 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
63 
66 struct rte_mempool_debug_stats {
67  uint64_t put_bulk;
68  uint64_t put_objs;
69  uint64_t get_success_bulk;
70  uint64_t get_success_objs;
71  uint64_t get_fail_bulk;
72  uint64_t get_fail_objs;
74  uint64_t get_success_blks;
76  uint64_t get_fail_blks;
78 #endif
79 
84  uint32_t size;
85  uint32_t flushthresh;
86  uint32_t len;
87  /*
88  * Cache is allocated to this size to allow it to overflow in certain
89  * cases to avoid needless emptying of cache.
90  */
91  void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
93 
98  uint32_t elt_size;
99  uint32_t header_size;
100  uint32_t trailer_size;
101  uint32_t total_size;
103 };
104 
106 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
107  sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
108 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
109 
110 /* "MP_<name>" */
111 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
112 
113 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
114 
116 #define MEMPOOL_PG_NUM_DEFAULT 1
117 
118 #ifndef RTE_MEMPOOL_ALIGN
119 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
120 #endif
121 
122 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
123 
135  struct rte_mempool *mp;
137  union {
140  };
141 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
142  uint64_t cookie;
143 #endif
144 };
145 
149 STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
150 
151 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
152 
159 struct rte_mempool_objtlr {
160  uint64_t cookie;
161 };
162 
163 #endif
164 
168 STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
169 
174  void *opaque);
175 
184  struct rte_mempool *mp;
185  void *addr;
187  union {
190  };
191  size_t len;
193  void *opaque;
194 };
195 
207  unsigned int contig_block_size;
209 
213 struct rte_mempool {
214  /*
215  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
216  * compatibility requirements, it could be changed to
217  * RTE_MEMPOOL_NAMESIZE next time the ABI changes
218  */
221  union {
222  void *pool_data;
223  uint64_t pool_id;
224  };
225  void *pool_config;
226  const struct rte_memzone *mz;
227  unsigned int flags;
228  int socket_id;
229  uint32_t size;
230  uint32_t cache_size;
233  uint32_t elt_size;
234  uint32_t header_size;
235  uint32_t trailer_size;
237  unsigned private_data_size;
245  int32_t ops_index;
246 
249  uint32_t populated_size;
250  struct rte_mempool_objhdr_list elt_list;
251  uint32_t nb_mem_chunks;
252  struct rte_mempool_memhdr_list mem_list;
254 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
255 
256  struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
257 #endif
259 
260 #define MEMPOOL_F_NO_SPREAD 0x0001
261 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
262 #define MEMPOOL_F_SP_PUT 0x0004
263 #define MEMPOOL_F_SC_GET 0x0008
264 #define MEMPOOL_F_POOL_CREATED 0x0010
265 #define MEMPOOL_F_NO_IOVA_CONTIG 0x0020
266 #define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
267 
278 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
279 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
280  unsigned __lcore_id = rte_lcore_id(); \
281  if (__lcore_id < RTE_MAX_LCORE) { \
282  mp->stats[__lcore_id].name##_objs += n; \
283  mp->stats[__lcore_id].name##_bulk += 1; \
284  } \
285  } while(0)
286 #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \
287  unsigned int __lcore_id = rte_lcore_id(); \
288  if (__lcore_id < RTE_MAX_LCORE) { \
289  mp->stats[__lcore_id].name##_blks += n; \
290  mp->stats[__lcore_id].name##_bulk += 1; \
291  } \
292  } while (0)
293 #else
294 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
295 #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0)
296 #endif
297 
306 #define MEMPOOL_HEADER_SIZE(mp, cs) \
307  (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
308  (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
309 
310 /* return the header of a mempool object (internal) */
311 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
312 {
313  return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
314  sizeof(struct rte_mempool_objhdr));
315 }
316 
326 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
327 {
328  struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
329  return hdr->mp;
330 }
331 
332 /* return the trailer of a mempool object (internal) */
333 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
334 {
335  struct rte_mempool *mp = rte_mempool_from_obj(obj);
336  return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
337 }
338 
353 void rte_mempool_check_cookies(const struct rte_mempool *mp,
354  void * const *obj_table_const, unsigned n, int free);
355 
356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
357 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
358  rte_mempool_check_cookies(mp, obj_table_const, n, free)
359 #else
360 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
361 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
362 
382  void * const *first_obj_table_const, unsigned int n, int free);
383 
384 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
385 #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
386  free) \
387  rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
388  free)
389 #else
390 #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
391  free) \
392  do {} while (0)
393 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
394 
395 #define RTE_MEMPOOL_OPS_NAMESIZE 32
407 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
408 
412 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
413 
417 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
418  void * const *obj_table, unsigned int n);
419 
423 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
424  void **obj_table, unsigned int n);
425 
433  void **first_obj_table, unsigned int n);
434 
438 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
439 
463 typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
464  uint32_t obj_num, uint32_t pg_shift,
465  size_t *min_chunk_size, size_t *align);
466 
486 ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
487  uint32_t obj_num, uint32_t pg_shift,
488  size_t *min_chunk_size, size_t *align);
489 
502 typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
503  void *opaque, void *vaddr, rte_iova_t iova);
504 
533 typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
534  unsigned int max_objs,
535  void *vaddr, rte_iova_t iova, size_t len,
536  rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
537 
543  unsigned int max_objs,
544  void *vaddr, rte_iova_t iova, size_t len,
545  rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
546 
553 typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
554  struct rte_mempool_info *info);
555 
556 
584 
585 #define RTE_MEMPOOL_MAX_OPS_IDX 16
596 struct rte_mempool_ops_table {
598  uint32_t num_ops;
604 
607 
617 static inline struct rte_mempool_ops *
618 rte_mempool_get_ops(int ops_index)
619 {
620  RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
621 
622  return &rte_mempool_ops_table.ops[ops_index];
623 }
624 
634 int
635 rte_mempool_ops_alloc(struct rte_mempool *mp);
636 
650 static inline int
651 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
652  void **obj_table, unsigned n)
653 {
654  struct rte_mempool_ops *ops;
655 
656  ops = rte_mempool_get_ops(mp->ops_index);
657  return ops->dequeue(mp, obj_table, n);
658 }
659 
673 static inline int
674 rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
675  void **first_obj_table, unsigned int n)
676 {
677  struct rte_mempool_ops *ops;
678 
679  ops = rte_mempool_get_ops(mp->ops_index);
680  RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
681  return ops->dequeue_contig_blocks(mp, first_obj_table, n);
682 }
683 
697 static inline int
698 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
699  unsigned n)
700 {
701  struct rte_mempool_ops *ops;
702 
703  ops = rte_mempool_get_ops(mp->ops_index);
704  return ops->enqueue(mp, obj_table, n);
705 }
706 
715 unsigned
716 rte_mempool_ops_get_count(const struct rte_mempool *mp);
717 
737 ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
738  uint32_t obj_num, uint32_t pg_shift,
739  size_t *min_chunk_size, size_t *align);
740 
764 int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
765  void *vaddr, rte_iova_t iova, size_t len,
767  void *obj_cb_arg);
768 
784 __rte_experimental
785 int rte_mempool_ops_get_info(const struct rte_mempool *mp,
786  struct rte_mempool_info *info);
787 
794 void
795 rte_mempool_ops_free(struct rte_mempool *mp);
796 
814 int
815 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
816  void *pool_config);
817 
828 int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
829 
835 #define MEMPOOL_REGISTER_OPS(ops) \
836  void mp_hdlr_init_##ops(void); \
837  void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
838  { \
839  rte_mempool_register_ops(&ops); \
840  }
841 
847 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
848  void *opaque, void *obj, unsigned obj_idx);
849 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
850 
856 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
857  void *opaque, struct rte_mempool_memhdr *memhdr,
858  unsigned mem_idx);
859 
866 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
867 
947 struct rte_mempool *
948 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
949  unsigned cache_size, unsigned private_data_size,
950  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
951  rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
952  int socket_id, unsigned flags);
953 
988 struct rte_mempool *
989 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
990  unsigned cache_size, unsigned private_data_size,
991  int socket_id, unsigned flags);
1002 void
1003 rte_mempool_free(struct rte_mempool *mp);
1004 
1032 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1033  rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1034  void *opaque);
1035 
1060 int
1061 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
1062  size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1063  void *opaque);
1064 
1079 
1093 int rte_mempool_populate_anon(struct rte_mempool *mp);
1094 
1110 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
1111  rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1112 
1128 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
1129  rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1130 
1139 void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1140 
1155 struct rte_mempool_cache *
1156 rte_mempool_cache_create(uint32_t size, int socket_id);
1157 
1164 void
1166 
1177 static __rte_always_inline struct rte_mempool_cache *
1178 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1179 {
1180  if (mp->cache_size == 0)
1181  return NULL;
1182 
1183  if (lcore_id >= RTE_MAX_LCORE)
1184  return NULL;
1185 
1186  return &mp->local_cache[lcore_id];
1187 }
1188 
1197 static __rte_always_inline void
1199  struct rte_mempool *mp)
1200 {
1201  if (cache == NULL)
1202  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1203  if (cache == NULL || cache->len == 0)
1204  return;
1205  rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1206  cache->len = 0;
1207 }
1208 
1221 static __rte_always_inline void
1222 __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1223  unsigned int n, struct rte_mempool_cache *cache)
1224 {
1225  void **cache_objs;
1226 
1227  /* increment stat now, adding in mempool always success */
1228  __MEMPOOL_STAT_ADD(mp, put, n);
1229 
1230  /* No cache provided or if put would overflow mem allocated for cache */
1231  if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1232  goto ring_enqueue;
1233 
1234  cache_objs = &cache->objs[cache->len];
1235 
1236  /*
1237  * The cache follows the following algorithm
1238  * 1. Add the objects to the cache
1239  * 2. Anything greater than the cache min value (if it crosses the
1240  * cache flush threshold) is flushed to the ring.
1241  */
1242 
1243  /* Add elements back into the cache */
1244  rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
1245 
1246  cache->len += n;
1247 
1248  if (cache->len >= cache->flushthresh) {
1249  rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1250  cache->len - cache->size);
1251  cache->len = cache->size;
1252  }
1253 
1254  return;
1255 
1256 ring_enqueue:
1257 
1258  /* push remaining objects in ring */
1259 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1260  if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1261  rte_panic("cannot put objects in mempool\n");
1262 #else
1263  rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1264 #endif
1265 }
1266 
1267 
1280 static __rte_always_inline void
1281 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1282  unsigned int n, struct rte_mempool_cache *cache)
1283 {
1284  __mempool_check_cookies(mp, obj_table, n, 0);
1285  __mempool_generic_put(mp, obj_table, n, cache);
1286 }
1287 
1302 static __rte_always_inline void
1303 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1304  unsigned int n)
1305 {
1306  struct rte_mempool_cache *cache;
1307  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1308  rte_mempool_generic_put(mp, obj_table, n, cache);
1309 }
1310 
1323 static __rte_always_inline void
1324 rte_mempool_put(struct rte_mempool *mp, void *obj)
1325 {
1326  rte_mempool_put_bulk(mp, &obj, 1);
1327 }
1328 
1343 static __rte_always_inline int
1344 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1345  unsigned int n, struct rte_mempool_cache *cache)
1346 {
1347  int ret;
1348  uint32_t index, len;
1349  void **cache_objs;
1350 
1351  /* No cache provided or cannot be satisfied from cache */
1352  if (unlikely(cache == NULL || n >= cache->size))
1353  goto ring_dequeue;
1354 
1355  cache_objs = cache->objs;
1356 
1357  /* Can this be satisfied from the cache? */
1358  if (cache->len < n) {
1359  /* No. Backfill the cache first, and then fill from it */
1360  uint32_t req = n + (cache->size - cache->len);
1361 
1362  /* How many do we require i.e. number to fill the cache + the request */
1363  ret = rte_mempool_ops_dequeue_bulk(mp,
1364  &cache->objs[cache->len], req);
1365  if (unlikely(ret < 0)) {
1366  /*
1367  * In the offchance that we are buffer constrained,
1368  * where we are not able to allocate cache + n, go to
1369  * the ring directly. If that fails, we are truly out of
1370  * buffers.
1371  */
1372  goto ring_dequeue;
1373  }
1374 
1375  cache->len += req;
1376  }
1377 
1378  /* Now fill in the response ... */
1379  for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1380  *obj_table = cache_objs[len];
1381 
1382  cache->len -= n;
1383 
1384  __MEMPOOL_STAT_ADD(mp, get_success, n);
1385 
1386  return 0;
1387 
1388 ring_dequeue:
1389 
1390  /* get remaining objects from ring */
1391  ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1392 
1393  if (ret < 0)
1394  __MEMPOOL_STAT_ADD(mp, get_fail, n);
1395  else
1396  __MEMPOOL_STAT_ADD(mp, get_success, n);
1397 
1398  return ret;
1399 }
1400 
1421 static __rte_always_inline int
1422 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1423  unsigned int n, struct rte_mempool_cache *cache)
1424 {
1425  int ret;
1426  ret = __mempool_generic_get(mp, obj_table, n, cache);
1427  if (ret == 0)
1428  __mempool_check_cookies(mp, obj_table, n, 1);
1429  return ret;
1430 }
1431 
1454 static __rte_always_inline int
1455 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1456 {
1457  struct rte_mempool_cache *cache;
1458  cache = rte_mempool_default_cache(mp, rte_lcore_id());
1459  return rte_mempool_generic_get(mp, obj_table, n, cache);
1460 }
1461 
1482 static __rte_always_inline int
1483 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1484 {
1485  return rte_mempool_get_bulk(mp, obj_p, 1);
1486 }
1487 
1512 static __rte_always_inline int
1513 __rte_experimental
1515  void **first_obj_table, unsigned int n)
1516 {
1517  int ret;
1518 
1519  ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1520  if (ret == 0) {
1521  __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n);
1522  __mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
1523  1);
1524  } else {
1525  __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
1526  }
1527 
1528  return ret;
1529 }
1530 
1543 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1544 
1557 unsigned int
1558 rte_mempool_in_use_count(const struct rte_mempool *mp);
1559 
1573 static inline int
1575 {
1576  return !!(rte_mempool_avail_count(mp) == mp->size);
1577 }
1578 
1592 static inline int
1594 {
1595  return !!(rte_mempool_avail_count(mp) == 0);
1596 }
1597 
1608 static inline rte_iova_t
1609 rte_mempool_virt2iova(const void *elt)
1610 {
1611  const struct rte_mempool_objhdr *hdr;
1612  hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1613  sizeof(*hdr));
1614  return hdr->iova;
1615 }
1616 
1627 void rte_mempool_audit(struct rte_mempool *mp);
1628 
1637 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1638 {
1639  return (char *)mp +
1641 }
1642 
1649 void rte_mempool_list_dump(FILE *f);
1650 
1663 struct rte_mempool *rte_mempool_lookup(const char *name);
1664 
1682 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1683  struct rte_mempool_objsz *sz);
1684 
1693 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1694  void *arg);
1695 
1696 #ifdef __cplusplus
1697 }
1698 #endif
1699 
1700 #endif /* _RTE_MEMPOOL_H_ */