DPDK 25.03.0-rc0
rte_mempool.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2022 SmartShare Systems
5 */
6
7#ifndef _RTE_MEMPOOL_H_
8#define _RTE_MEMPOOL_H_
9
37#include <stdalign.h>
38#include <stdio.h>
39#include <stdint.h>
40#include <inttypes.h>
41
42#include <rte_compat.h>
43#include <rte_config.h>
44#include <rte_spinlock.h>
45#include <rte_debug.h>
46#include <rte_lcore.h>
47#include <rte_log.h>
49#include <rte_ring.h>
50#include <rte_memcpy.h>
51#include <rte_common.h>
52
54
55#ifdef __cplusplus
56extern "C" {
57#endif
58
59#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
60#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
61#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
63#ifdef RTE_LIBRTE_MEMPOOL_STATS
70struct __rte_cache_aligned rte_mempool_debug_stats {
71 uint64_t put_bulk;
72 uint64_t put_objs;
73 uint64_t put_common_pool_bulk;
74 uint64_t put_common_pool_objs;
75 uint64_t get_common_pool_bulk;
76 uint64_t get_common_pool_objs;
77 uint64_t get_success_bulk;
78 uint64_t get_success_objs;
79 uint64_t get_fail_bulk;
80 uint64_t get_fail_objs;
81 uint64_t get_success_blks;
82 uint64_t get_fail_blks;
84};
85#endif
86
91 uint32_t size;
92 uint32_t flushthresh;
93 uint32_t len;
94#ifdef RTE_LIBRTE_MEMPOOL_STATS
95 uint32_t unused;
96 /*
97 * Alternative location for the most frequently updated mempool statistics (per-lcore),
98 * providing faster update access when using a mempool cache.
99 */
100 struct {
101 uint64_t put_bulk;
102 uint64_t put_objs;
103 uint64_t get_success_bulk;
104 uint64_t get_success_objs;
105 } stats;
106#endif
113 alignas(RTE_CACHE_LINE_SIZE) void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2];
114};
115
120 uint32_t elt_size;
121 uint32_t header_size;
122 uint32_t trailer_size;
123 uint32_t total_size;
125};
126
128#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
129 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
130#define RTE_MEMPOOL_MZ_PREFIX "MP_"
131
132/* "MP_<name>" */
133#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
134
135#ifndef RTE_MEMPOOL_ALIGN
139#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
140#endif
141
142#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
143
155 struct rte_mempool *mp;
157#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
158 uint64_t cookie;
159#endif
160};
161
165RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
166
167#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
168
175struct rte_mempool_objtlr {
176 uint64_t cookie;
177};
178
179#endif
180
184extern int rte_mempool_logtype;
185#define RTE_LOGTYPE_MEMPOOL rte_mempool_logtype
186#define RTE_MEMPOOL_LOG(level, ...) \
187 RTE_LOG_LINE(level, MEMPOOL, "" __VA_ARGS__)
188
192RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
193
198 void *opaque);
199
208 struct rte_mempool *mp;
209 void *addr;
211 size_t len;
213 void *opaque;
214};
215
224 unsigned int contig_block_size;
225};
226
231 char name[RTE_MEMPOOL_NAMESIZE];
232 union {
233 void *pool_data;
234 uint64_t pool_id;
235 };
237 const struct rte_memzone *mz;
238 unsigned int flags;
240 uint32_t size;
241 uint32_t cache_size;
244 uint32_t elt_size;
245 uint32_t header_size;
246 uint32_t trailer_size;
256 int32_t ops_index;
257
260 uint32_t populated_size;
261 struct rte_mempool_objhdr_list elt_list;
262 uint32_t nb_mem_chunks;
263 struct rte_mempool_memhdr_list mem_list;
265#ifdef RTE_LIBRTE_MEMPOOL_STATS
270 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
271#endif
272};
273
275#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
280#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
282#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
287#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
289#define RTE_MEMPOOL_F_SP_PUT 0x0004
294#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
296#define RTE_MEMPOOL_F_SC_GET 0x0008
301#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
303#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
305#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
310#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
312#define RTE_MEMPOOL_F_NON_IO 0x0040
313
317#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
318 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
319 | RTE_MEMPOOL_F_SP_PUT \
320 | RTE_MEMPOOL_F_SC_GET \
321 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
322 )
323
334#ifdef RTE_LIBRTE_MEMPOOL_STATS
335#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
336 unsigned int __lcore_id = rte_lcore_id(); \
337 if (likely(__lcore_id < RTE_MAX_LCORE)) \
338 (mp)->stats[__lcore_id].name += (n); \
339 else \
340 rte_atomic_fetch_add_explicit(&((mp)->stats[RTE_MAX_LCORE].name), \
341 (n), rte_memory_order_relaxed); \
342 } while (0)
343#else
344#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
345#endif
346
357#ifdef RTE_LIBRTE_MEMPOOL_STATS
358#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n))
359#else
360#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0)
361#endif
362
371#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
372 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
373 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
374
375/* return the header of a mempool object (internal) */
376static inline struct rte_mempool_objhdr *
377rte_mempool_get_header(void *obj)
378{
379 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
380 sizeof(struct rte_mempool_objhdr));
381}
382
392static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
393{
394 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
395 return hdr->mp;
396}
397
398/* return the trailer of a mempool object (internal) */
399static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
400{
401 struct rte_mempool *mp = rte_mempool_from_obj(obj);
402 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
403}
404
419void rte_mempool_check_cookies(const struct rte_mempool *mp,
420 void * const *obj_table_const, unsigned n, int free);
421
422#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
423#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
424 rte_mempool_check_cookies(mp, obj_table_const, n, free)
425#else
426#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
427#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
428
444void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
445 void * const *first_obj_table_const, unsigned int n, int free);
446
447#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
448#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
449 free) \
450 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
451 free)
452#else
453#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
454 free) \
455 do {} while (0)
456#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
457
458#define RTE_MEMPOOL_OPS_NAMESIZE 32
470typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
471
475typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
476
483typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
484 void * const *obj_table, unsigned int n);
485
492typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
493 void **obj_table, unsigned int n);
494
499 void **first_obj_table, unsigned int n);
500
504typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
505
529typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
530 uint32_t obj_num, uint32_t pg_shift,
531 size_t *min_chunk_size, size_t *align);
532
568ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
569 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
570 size_t *min_chunk_size, size_t *align);
571
580 uint32_t obj_num, uint32_t pg_shift,
581 size_t *min_chunk_size, size_t *align);
582
596 void *opaque, void *vaddr, rte_iova_t iova);
597
626typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
627 unsigned int max_objs,
628 void *vaddr, rte_iova_t iova, size_t len,
629 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
630
634#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
635
668int rte_mempool_op_populate_helper(struct rte_mempool *mp,
669 unsigned int flags, unsigned int max_objs,
670 void *vaddr, rte_iova_t iova, size_t len,
671 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
672
680 unsigned int max_objs,
681 void *vaddr, rte_iova_t iova, size_t len,
682 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
683
687typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
688 struct rte_mempool_info *info);
689
690
717};
718
719#define RTE_MEMPOOL_MAX_OPS_IDX 16
732 uint32_t num_ops;
737};
738
741
751static inline struct rte_mempool_ops *
752rte_mempool_get_ops(int ops_index)
753{
754 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
755
756 return &rte_mempool_ops_table.ops[ops_index];
757}
758
768int
769rte_mempool_ops_alloc(struct rte_mempool *mp);
770
784static inline int
785rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
786 void **obj_table, unsigned n)
787{
788 struct rte_mempool_ops *ops;
789 int ret;
790
791 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
792 ops = rte_mempool_get_ops(mp->ops_index);
793 ret = ops->dequeue(mp, obj_table, n);
794 if (ret == 0) {
795 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
796 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
797 }
798 return ret;
799}
800
814static inline int
815rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
816 void **first_obj_table, unsigned int n)
817{
818 struct rte_mempool_ops *ops;
819
820 ops = rte_mempool_get_ops(mp->ops_index);
821 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
822 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
823 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
824}
825
839static inline int
840rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
841 unsigned n)
842{
843 struct rte_mempool_ops *ops;
844 int ret;
845
846 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
847 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
848 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
849 ops = rte_mempool_get_ops(mp->ops_index);
850 ret = ops->enqueue(mp, obj_table, n);
851#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
852 if (unlikely(ret < 0))
853 RTE_MEMPOOL_LOG(CRIT, "cannot enqueue %u objects to mempool %s",
854 n, mp->name);
855#endif
856 return ret;
857}
858
867unsigned
868rte_mempool_ops_get_count(const struct rte_mempool *mp);
869
889ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
890 uint32_t obj_num, uint32_t pg_shift,
891 size_t *min_chunk_size, size_t *align);
892
916int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
917 void *vaddr, rte_iova_t iova, size_t len,
919 void *obj_cb_arg);
920
934 struct rte_mempool_info *info);
935
942void
943rte_mempool_ops_free(struct rte_mempool *mp);
944
962int
964 void *pool_config);
965
977
983#define RTE_MEMPOOL_REGISTER_OPS(ops) \
984 RTE_INIT(mp_hdlr_init_##ops) \
985 { \
986 rte_mempool_register_ops(&ops); \
987 }
988
994typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
995 void *opaque, void *obj, unsigned obj_idx);
996typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
997
1003typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
1004 void *opaque, struct rte_mempool_memhdr *memhdr,
1005 unsigned mem_idx);
1006
1013typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
1014
1093struct rte_mempool *
1094rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1095 unsigned cache_size, unsigned private_data_size,
1096 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1097 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1098 int socket_id, unsigned flags);
1099
1134struct rte_mempool *
1135rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1136 unsigned cache_size, unsigned private_data_size,
1137 int socket_id, unsigned flags);
1149void
1151
1182int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1183 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1184 void *opaque);
1185
1212int
1214 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1215 void *opaque);
1216
1231
1246
1263 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1264
1281 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1282
1291void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1292
1307struct rte_mempool_cache *
1308rte_mempool_cache_create(uint32_t size, int socket_id);
1309
1316void
1318
1331rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1332{
1333 if (mp->cache_size == 0)
1334 return NULL;
1335
1336 if (lcore_id >= RTE_MAX_LCORE)
1337 return NULL;
1338
1339 rte_mempool_trace_default_cache(mp, lcore_id,
1340 &mp->local_cache[lcore_id]);
1341 return &mp->local_cache[lcore_id];
1342}
1343
1352static __rte_always_inline void
1354 struct rte_mempool *mp)
1355{
1356 if (cache == NULL)
1358 if (cache == NULL || cache->len == 0)
1359 return;
1360 rte_mempool_trace_cache_flush(cache, mp);
1361 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1362 cache->len = 0;
1363}
1364
1377static __rte_always_inline void
1378rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1379 unsigned int n, struct rte_mempool_cache *cache)
1380{
1381 void **cache_objs;
1382
1383 /* No cache provided */
1384 if (unlikely(cache == NULL))
1385 goto driver_enqueue;
1386
1387 /* increment stat now, adding in mempool always success */
1388 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1389 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1390
1391 /* The request itself is too big for the cache */
1392 if (unlikely(n > cache->flushthresh))
1393 goto driver_enqueue_stats_incremented;
1394
1395 /*
1396 * The cache follows the following algorithm:
1397 * 1. If the objects cannot be added to the cache without crossing
1398 * the flush threshold, flush the cache to the backend.
1399 * 2. Add the objects to the cache.
1400 */
1401
1402 if (cache->len + n <= cache->flushthresh) {
1403 cache_objs = &cache->objs[cache->len];
1404 cache->len += n;
1405 } else {
1406 cache_objs = &cache->objs[0];
1407 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1408 cache->len = n;
1409 }
1410
1411 /* Add the objects to the cache. */
1412 rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1413
1414 return;
1415
1416driver_enqueue:
1417
1418 /* increment stat now, adding in mempool always success */
1419 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1420 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1421
1422driver_enqueue_stats_incremented:
1423
1424 /* push objects to the backend */
1425 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1426}
1427
1428
1441static __rte_always_inline void
1442rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1443 unsigned int n, struct rte_mempool_cache *cache)
1444{
1445 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1446 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1447 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1448}
1449
1464static __rte_always_inline void
1465rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1466 unsigned int n)
1467{
1468 struct rte_mempool_cache *cache;
1470 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1471 rte_mempool_generic_put(mp, obj_table, n, cache);
1472}
1473
1486static __rte_always_inline void
1487rte_mempool_put(struct rte_mempool *mp, void *obj)
1488{
1489 rte_mempool_put_bulk(mp, &obj, 1);
1490}
1491
1506static __rte_always_inline int
1507rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1508 unsigned int n, struct rte_mempool_cache *cache)
1509{
1510 int ret;
1511 unsigned int remaining;
1512 uint32_t index, len;
1513 void **cache_objs;
1514
1515 /* No cache provided */
1516 if (unlikely(cache == NULL)) {
1517 remaining = n;
1518 goto driver_dequeue;
1519 }
1520
1521 /* The cache is a stack, so copy will be in reverse order. */
1522 cache_objs = &cache->objs[cache->len];
1523
1524 if (__rte_constant(n) && n <= cache->len) {
1525 /*
1526 * The request size is known at build time, and
1527 * the entire request can be satisfied from the cache,
1528 * so let the compiler unroll the fixed length copy loop.
1529 */
1530 cache->len -= n;
1531 for (index = 0; index < n; index++)
1532 *obj_table++ = *--cache_objs;
1533
1534 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1535 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1536
1537 return 0;
1538 }
1539
1540 /*
1541 * Use the cache as much as we have to return hot objects first.
1542 * If the request size 'n' is known at build time, the above comparison
1543 * ensures that n > cache->len here, so omit RTE_MIN().
1544 */
1545 len = __rte_constant(n) ? cache->len : RTE_MIN(n, cache->len);
1546 cache->len -= len;
1547 remaining = n - len;
1548 for (index = 0; index < len; index++)
1549 *obj_table++ = *--cache_objs;
1550
1551 /*
1552 * If the request size 'n' is known at build time, the case
1553 * where the entire request can be satisfied from the cache
1554 * has already been handled above, so omit handling it here.
1555 */
1556 if (!__rte_constant(n) && remaining == 0) {
1557 /* The entire request is satisfied from the cache. */
1558
1559 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1560 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1561
1562 return 0;
1563 }
1564
1565 /* if dequeue below would overflow mem allocated for cache */
1566 if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1567 goto driver_dequeue;
1568
1569 /* Fill the cache from the backend; fetch size + remaining objects. */
1570 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1571 cache->size + remaining);
1572 if (unlikely(ret < 0)) {
1573 /*
1574 * We are buffer constrained, and not able to allocate
1575 * cache + remaining.
1576 * Do not fill the cache, just satisfy the remaining part of
1577 * the request directly from the backend.
1578 */
1579 goto driver_dequeue;
1580 }
1581
1582 /* Satisfy the remaining part of the request from the filled cache. */
1583 cache_objs = &cache->objs[cache->size + remaining];
1584 for (index = 0; index < remaining; index++)
1585 *obj_table++ = *--cache_objs;
1586
1587 cache->len = cache->size;
1588
1589 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1590 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1591
1592 return 0;
1593
1594driver_dequeue:
1595
1596 /* Get remaining objects directly from the backend. */
1597 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1598
1599 if (ret < 0) {
1600 if (likely(cache != NULL)) {
1601 cache->len = n - remaining;
1602 /*
1603 * No further action is required to roll the first part
1604 * of the request back into the cache, as objects in
1605 * the cache are intact.
1606 */
1607 }
1608
1609 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1610 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1611 } else {
1612 if (likely(cache != NULL)) {
1613 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1614 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1615 } else {
1616 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1617 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1618 }
1619 }
1620
1621 return ret;
1622}
1623
1644static __rte_always_inline int
1645rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1646 unsigned int n, struct rte_mempool_cache *cache)
1647{
1648 int ret;
1649 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1650 if (ret == 0)
1651 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1652 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1653 return ret;
1654}
1655
1678static __rte_always_inline int
1679rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1680{
1681 struct rte_mempool_cache *cache;
1683 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1684 return rte_mempool_generic_get(mp, obj_table, n, cache);
1685}
1686
1707static __rte_always_inline int
1708rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1709{
1710 return rte_mempool_get_bulk(mp, obj_p, 1);
1711}
1712
1734static __rte_always_inline int
1736 void **first_obj_table, unsigned int n)
1737{
1738 int ret;
1739
1740 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1741 if (ret == 0) {
1742 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1743 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1744 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1745 1);
1746 } else {
1747 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1748 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1749 }
1750
1751 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1752 return ret;
1753}
1754
1767unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1768
1781unsigned int
1783
1797static inline int
1799{
1800 return rte_mempool_avail_count(mp) == mp->size;
1801}
1802
1816static inline int
1818{
1819 return rte_mempool_avail_count(mp) == 0;
1820}
1821
1832static inline rte_iova_t
1834{
1835 const struct rte_mempool_objhdr *hdr;
1836 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1837 sizeof(*hdr));
1838 return hdr->iova;
1839}
1840
1852
1861static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1862{
1863 return (char *)mp +
1864 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1865}
1866
1874
1887
1905uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1906 struct rte_mempool_objsz *sz);
1907
1916void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1917 void *arg);
1918
1925 void *start;
1927 size_t length;
1930};
1931
1947__rte_experimental
1948int
1950 struct rte_mempool_mem_range_info *mem_range);
1951
1964__rte_experimental
1965size_t
1967
1972int
1973rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1974
1984};
1985
1995typedef void (rte_mempool_event_callback)(
1996 enum rte_mempool_event event,
1997 struct rte_mempool *mp,
1998 void *user_data);
1999
2016__rte_internal
2017int
2018rte_mempool_event_callback_register(rte_mempool_event_callback *func,
2019 void *user_data);
2020
2034__rte_internal
2035int
2036rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
2037 void *user_data);
2038
2039#ifdef __cplusplus
2040}
2041#endif
2042
2043#endif /* _RTE_MEMPOOL_H_ */
#define likely(x)
#define unlikely(x)
#define RTE_MIN(a, b)
Definition: rte_common.h:683
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:474
uint64_t rte_iova_t
Definition: rte_common.h:658
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:469
#define RTE_CACHE_GUARD
Definition: rte_common.h:643
#define __rte_cache_aligned
Definition: rte_common.h:627
#define __rte_always_inline
Definition: rte_common.h:413
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:78
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
Definition: rte_mempool.h:197
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1679
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
Definition: rte_mempool.h:1331
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
Definition: rte_mempool.h:994
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:483
static struct rte_mempool * rte_mempool_from_obj(void *obj)
Definition: rte_mempool.h:392
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1833
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:492
void rte_mempool_free(struct rte_mempool *mp)
rte_mempool_event
Definition: rte_mempool.h:1979
@ RTE_MEMPOOL_EVENT_DESTROY
Definition: rte_mempool.h:1983
@ RTE_MEMPOOL_EVENT_READY
Definition: rte_mempool.h:1981
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
Definition: rte_mempool.h:504
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
Definition: rte_mempool.h:595
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:1735
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:498
void(* rte_mempool_free_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:475
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
Definition: rte_mempool.h:1353
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:1465
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1708
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
__rte_experimental size_t rte_mempool_get_obj_alignment(const struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1442
static int rte_mempool_full(const struct rte_mempool *mp)
Definition: rte_mempool.h:1798
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:470
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1645
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
Definition: rte_mempool.h:458
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
Definition: rte_mempool.h:1013
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
Definition: rte_mempool.h:1003
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
Definition: rte_mempool.h:529
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1487
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void rte_mempool_list_dump(FILE *f)
__rte_experimental int rte_mempool_get_mem_range(const struct rte_mempool *mp, struct rte_mempool_mem_range_info *mem_range)
#define RTE_MEMPOOL_MAX_OPS_IDX
Definition: rte_mempool.h:719
static int rte_mempool_empty(const struct rte_mempool *mp)
Definition: rte_mempool.h:1817
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
Definition: rte_mempool.h:626
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1861
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
Definition: rte_mempool.h:687
uint32_t flushthresh
Definition: rte_mempool.h:92
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2]
Definition: rte_mempool.h:113
unsigned int contig_block_size
Definition: rte_mempool.h:224
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
struct rte_mempool * mp
Definition: rte_mempool.h:208
rte_mempool_memchunk_free_cb_t * free_cb
Definition: rte_mempool.h:212
struct rte_mempool * mp
Definition: rte_mempool.h:155
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
uint32_t header_size
Definition: rte_mempool.h:121
uint32_t trailer_size
Definition: rte_mempool.h:122
uint32_t total_size
Definition: rte_mempool.h:123
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
Definition: rte_mempool.h:736
rte_spinlock_t sl
Definition: rte_mempool.h:731
char name[RTE_MEMPOOL_OPS_NAMESIZE]
Definition: rte_mempool.h:693
rte_mempool_alloc_t alloc
Definition: rte_mempool.h:694
rte_mempool_dequeue_t dequeue
Definition: rte_mempool.h:697
rte_mempool_get_info_t get_info
Definition: rte_mempool.h:712
rte_mempool_calc_mem_size_t calc_mem_size
Definition: rte_mempool.h:703
rte_mempool_get_count get_count
Definition: rte_mempool.h:698
rte_mempool_populate_t populate
Definition: rte_mempool.h:708
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
Definition: rte_mempool.h:716
rte_mempool_free_t free
Definition: rte_mempool.h:695
rte_mempool_enqueue_t enqueue
Definition: rte_mempool.h:696
uint32_t nb_mem_chunks
Definition: rte_mempool.h:262
const struct rte_memzone * mz
Definition: rte_mempool.h:237
uint32_t populated_size
Definition: rte_mempool.h:260
uint32_t header_size
Definition: rte_mempool.h:245
uint64_t pool_id
Definition: rte_mempool.h:234
int32_t ops_index
Definition: rte_mempool.h:256
void * pool_config
Definition: rte_mempool.h:236
uint32_t trailer_size
Definition: rte_mempool.h:246
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t size
Definition: rte_mempool.h:240
uint32_t cache_size
Definition: rte_mempool.h:241
unsigned int flags
Definition: rte_mempool.h:238
uint32_t elt_size
Definition: rte_mempool.h:244
unsigned private_data_size
Definition: rte_mempool.h:248
struct rte_mempool_cache * local_cache
Definition: rte_mempool.h:258
void * pool_data
Definition: rte_mempool.h:233