DPDK 25.07.0-rc1
rte_mempool.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2022 SmartShare Systems
5 */
6
7#ifndef _RTE_MEMPOOL_H_
8#define _RTE_MEMPOOL_H_
9
37#include <stdalign.h>
38#include <stdio.h>
39#include <stdint.h>
40#include <inttypes.h>
41
42#include <rte_compat.h>
43#include <rte_config.h>
44#include <rte_spinlock.h>
45#include <rte_debug.h>
46#include <rte_lcore.h>
47#include <rte_log.h>
49#include <rte_ring.h>
50#include <rte_memcpy.h>
51#include <rte_common.h>
52
54
55#ifdef __cplusplus
56extern "C" {
57#endif
58
59#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
60#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
61#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
63#ifdef RTE_LIBRTE_MEMPOOL_STATS
70struct __rte_cache_aligned rte_mempool_debug_stats {
71 uint64_t put_bulk;
72 uint64_t put_objs;
73 uint64_t put_common_pool_bulk;
74 uint64_t put_common_pool_objs;
75 uint64_t get_common_pool_bulk;
76 uint64_t get_common_pool_objs;
77 uint64_t get_success_bulk;
78 uint64_t get_success_objs;
79 uint64_t get_fail_bulk;
80 uint64_t get_fail_objs;
81 uint64_t get_success_blks;
82 uint64_t get_fail_blks;
84};
85#endif
86
91 uint32_t size;
92 uint32_t flushthresh;
93 uint32_t len;
94#ifdef RTE_LIBRTE_MEMPOOL_STATS
95 uint32_t unused;
96 /*
97 * Alternative location for the most frequently updated mempool statistics (per-lcore),
98 * providing faster update access when using a mempool cache.
99 */
100 struct {
101 uint64_t put_bulk;
102 uint64_t put_objs;
103 uint64_t get_success_bulk;
104 uint64_t get_success_objs;
105 } stats;
106#endif
113 alignas(RTE_CACHE_LINE_SIZE) void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2];
114};
115
120 uint32_t elt_size;
121 uint32_t header_size;
122 uint32_t trailer_size;
123 uint32_t total_size;
125};
126
128#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
129 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
130#define RTE_MEMPOOL_MZ_PREFIX "MP_"
131
132/* "MP_<name>" */
133#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
134
135#ifndef RTE_MEMPOOL_ALIGN
139#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
140#endif
141
142#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
143
155 struct rte_mempool *mp;
157#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
158 uint64_t cookie;
159#endif
160};
161
165RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
166
167#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
168
175struct rte_mempool_objtlr {
176 uint64_t cookie;
177};
178
179#endif
180
184extern int rte_mempool_logtype;
185#define RTE_LOGTYPE_MEMPOOL rte_mempool_logtype
186#define RTE_MEMPOOL_LOG(level, ...) \
187 RTE_LOG_LINE(level, MEMPOOL, "" __VA_ARGS__)
188
192RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
193
198 void *opaque);
199
208 struct rte_mempool *mp;
209 void *addr;
211 size_t len;
213 void *opaque;
214};
215
224 unsigned int contig_block_size;
225};
226
231 char name[RTE_MEMPOOL_NAMESIZE];
232 union {
233 void *pool_data;
234 uint64_t pool_id;
235 };
237 const struct rte_memzone *mz;
238 unsigned int flags;
240 uint32_t size;
241 uint32_t cache_size;
244 uint32_t elt_size;
245 uint32_t header_size;
246 uint32_t trailer_size;
256 int32_t ops_index;
257
260 uint32_t populated_size;
261 struct rte_mempool_objhdr_list elt_list;
262 uint32_t nb_mem_chunks;
263 struct rte_mempool_memhdr_list mem_list;
265#ifdef RTE_LIBRTE_MEMPOOL_STATS
270 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
271#endif
272};
273
275#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
280#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
282#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
287#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
289#define RTE_MEMPOOL_F_SP_PUT 0x0004
294#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
296#define RTE_MEMPOOL_F_SC_GET 0x0008
301#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
303#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
305#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
310#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
312#define RTE_MEMPOOL_F_NON_IO 0x0040
313
317#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
318 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
319 | RTE_MEMPOOL_F_SP_PUT \
320 | RTE_MEMPOOL_F_SC_GET \
321 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
322 )
323
334#ifdef RTE_LIBRTE_MEMPOOL_STATS
335#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
336 unsigned int __lcore_id = rte_lcore_id(); \
337 if (likely(__lcore_id != LCORE_ID_ANY)) \
338 (mp)->stats[__lcore_id].name += (n); \
339 else \
340 rte_atomic_fetch_add_explicit(&((mp)->stats[RTE_MAX_LCORE].name), \
341 (n), rte_memory_order_relaxed); \
342 } while (0)
343#else
344#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
345#endif
346
357#ifdef RTE_LIBRTE_MEMPOOL_STATS
358#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n))
359#else
360#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0)
361#endif
362
371#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
372 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
373 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
374
375/* return the header of a mempool object (internal) */
376static inline struct rte_mempool_objhdr *
377rte_mempool_get_header(void *obj)
378{
379 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
380 sizeof(struct rte_mempool_objhdr));
381}
382
392static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
393{
394 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
395 return hdr->mp;
396}
397
398/* return the trailer of a mempool object (internal) */
399static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
400{
401 struct rte_mempool *mp = rte_mempool_from_obj(obj);
402 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
403}
404
419void rte_mempool_check_cookies(const struct rte_mempool *mp,
420 void * const *obj_table_const, unsigned n, int free);
421
422#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
423#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
424 rte_mempool_check_cookies(mp, obj_table_const, n, free)
425#else
426#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
427#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
428
444void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
445 void * const *first_obj_table_const, unsigned int n, int free);
446
447#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
448#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
449 free) \
450 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
451 free)
452#else
453#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
454 free) \
455 do {} while (0)
456#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
457
458#define RTE_MEMPOOL_OPS_NAMESIZE 32
470typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
471
475typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
476
483typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
484 void * const *obj_table, unsigned int n);
485
492typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
493 void **obj_table, unsigned int n);
494
499 void **first_obj_table, unsigned int n);
500
504typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
505
529typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
530 uint32_t obj_num, uint32_t pg_shift,
531 size_t *min_chunk_size, size_t *align);
532
568ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
569 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
570 size_t *min_chunk_size, size_t *align);
571
580 uint32_t obj_num, uint32_t pg_shift,
581 size_t *min_chunk_size, size_t *align);
582
596 void *opaque, void *vaddr, rte_iova_t iova);
597
626typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
627 unsigned int max_objs,
628 void *vaddr, rte_iova_t iova, size_t len,
629 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
630
634#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
635
668int rte_mempool_op_populate_helper(struct rte_mempool *mp,
669 unsigned int flags, unsigned int max_objs,
670 void *vaddr, rte_iova_t iova, size_t len,
671 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
672
680 unsigned int max_objs,
681 void *vaddr, rte_iova_t iova, size_t len,
682 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
683
687typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
688 struct rte_mempool_info *info);
689
690
717};
718
719#define RTE_MEMPOOL_MAX_OPS_IDX 16
732 uint32_t num_ops;
737};
738
741
751static inline struct rte_mempool_ops *
752rte_mempool_get_ops(int ops_index)
753{
754 RTE_ASSERT((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
755
756 return &rte_mempool_ops_table.ops[ops_index];
757}
758
768int
769rte_mempool_ops_alloc(struct rte_mempool *mp);
770
784static inline int
785rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
786 void **obj_table, unsigned n)
787{
788 struct rte_mempool_ops *ops;
789 int ret;
790
791 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
792 ops = rte_mempool_get_ops(mp->ops_index);
793 ret = ops->dequeue(mp, obj_table, n);
794 RTE_ASSERT(ret <= 0);
795 if (likely(ret == 0)) {
796 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
797 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
798 }
799 return ret;
800}
801
815static inline int
816rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
817 void **first_obj_table, unsigned int n)
818{
819 struct rte_mempool_ops *ops;
820 int ret;
821
822 ops = rte_mempool_get_ops(mp->ops_index);
823 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
824 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
825 ret = ops->dequeue_contig_blocks(mp, first_obj_table, n);
826 RTE_ASSERT(ret <= 0);
827 return ret;
828}
829
843static inline int
844rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
845 unsigned n)
846{
847 struct rte_mempool_ops *ops;
848 int ret;
849
850 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
851 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
852 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
853 ops = rte_mempool_get_ops(mp->ops_index);
854 ret = ops->enqueue(mp, obj_table, n);
855 RTE_ASSERT(ret <= 0);
856#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
857 if (unlikely(ret < 0))
858 RTE_MEMPOOL_LOG(CRIT, "cannot enqueue %u objects to mempool %s",
859 n, mp->name);
860#endif
861 return ret;
862}
863
872unsigned
873rte_mempool_ops_get_count(const struct rte_mempool *mp);
874
894ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
895 uint32_t obj_num, uint32_t pg_shift,
896 size_t *min_chunk_size, size_t *align);
897
921int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
922 void *vaddr, rte_iova_t iova, size_t len,
924 void *obj_cb_arg);
925
939 struct rte_mempool_info *info);
940
947void
948rte_mempool_ops_free(struct rte_mempool *mp);
949
967int
969 void *pool_config);
970
982
988#define RTE_MEMPOOL_REGISTER_OPS(ops) \
989 RTE_INIT(mp_hdlr_init_##ops) \
990 { \
991 rte_mempool_register_ops(&ops); \
992 }
993
999typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
1000 void *opaque, void *obj, unsigned obj_idx);
1001typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
1002
1008typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
1009 void *opaque, struct rte_mempool_memhdr *memhdr,
1010 unsigned mem_idx);
1011
1018typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
1019
1031void
1033
1112struct rte_mempool *
1113rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1114 unsigned cache_size, unsigned private_data_size,
1115 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1116 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1117 int socket_id, unsigned int flags)
1119
1154struct rte_mempool *
1155rte_mempool_create_empty(const char *name, unsigned int n, unsigned int elt_size,
1156 unsigned int cache_size, unsigned int private_data_size,
1157 int socket_id, unsigned int flags)
1159
1190int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1191 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1192 void *opaque);
1193
1220int
1222 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1223 void *opaque);
1224
1239
1254
1271 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1272
1289 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1290
1299void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1300
1315struct rte_mempool_cache *
1316rte_mempool_cache_create(uint32_t size, int socket_id);
1317
1324void
1326
1339rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1340{
1341 if (unlikely(mp->cache_size == 0))
1342 return NULL;
1343
1344 if (unlikely(lcore_id == LCORE_ID_ANY))
1345 return NULL;
1346
1347 rte_mempool_trace_default_cache(mp, lcore_id,
1348 &mp->local_cache[lcore_id]);
1349 return &mp->local_cache[lcore_id];
1350}
1351
1360static __rte_always_inline void
1362 struct rte_mempool *mp)
1363{
1364 if (cache == NULL)
1366 if (cache == NULL || cache->len == 0)
1367 return;
1368 rte_mempool_trace_cache_flush(cache, mp);
1369 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1370 cache->len = 0;
1371}
1372
1385static __rte_always_inline void
1386rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1387 unsigned int n, struct rte_mempool_cache *cache)
1388{
1389 void **cache_objs;
1390
1391 /* No cache provided? */
1392 if (unlikely(cache == NULL))
1393 goto driver_enqueue;
1394
1395 /* Increment stats now, adding in mempool always succeeds. */
1396 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1397 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1398
1399 __rte_assume(cache->flushthresh <= RTE_MEMPOOL_CACHE_MAX_SIZE * 2);
1400 __rte_assume(cache->len <= RTE_MEMPOOL_CACHE_MAX_SIZE * 2);
1401 __rte_assume(cache->len <= cache->flushthresh);
1402 if (likely(cache->len + n <= cache->flushthresh)) {
1403 /* Sufficient room in the cache for the objects. */
1404 cache_objs = &cache->objs[cache->len];
1405 cache->len += n;
1406 } else if (n <= cache->flushthresh) {
1407 /*
1408 * The cache is big enough for the objects, but - as detected by
1409 * the comparison above - has insufficient room for them.
1410 * Flush the cache to make room for the objects.
1411 */
1412 cache_objs = &cache->objs[0];
1413 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1414 cache->len = n;
1415 } else {
1416 /* The request itself is too big for the cache. */
1417 goto driver_enqueue_stats_incremented;
1418 }
1419
1420 /* Add the objects to the cache. */
1421 rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1422
1423 return;
1424
1425driver_enqueue:
1426
1427 /* increment stat now, adding in mempool always success */
1428 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1429 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1430
1431driver_enqueue_stats_incremented:
1432
1433 /* push objects to the backend */
1434 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1435}
1436
1437
1450static __rte_always_inline void
1451rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1452 unsigned int n, struct rte_mempool_cache *cache)
1453{
1454 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1455 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1456 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1457}
1458
1473static __rte_always_inline void
1474rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1475 unsigned int n)
1476{
1477 struct rte_mempool_cache *cache;
1479 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1480 rte_mempool_generic_put(mp, obj_table, n, cache);
1481}
1482
1495static __rte_always_inline void
1496rte_mempool_put(struct rte_mempool *mp, void *obj)
1497{
1498 rte_mempool_put_bulk(mp, &obj, 1);
1499}
1500
1515static __rte_always_inline int
1516rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1517 unsigned int n, struct rte_mempool_cache *cache)
1518{
1519 int ret;
1520 unsigned int remaining;
1521 uint32_t index, len;
1522 void **cache_objs;
1523
1524 /* No cache provided? */
1525 if (unlikely(cache == NULL)) {
1526 remaining = n;
1527 goto driver_dequeue;
1528 }
1529
1530 /* The cache is a stack, so copy will be in reverse order. */
1531 cache_objs = &cache->objs[cache->len];
1532
1533 __rte_assume(cache->len <= RTE_MEMPOOL_CACHE_MAX_SIZE * 2);
1534 if (__rte_constant(n) && n <= cache->len) {
1535 /*
1536 * The request size is known at build time, and
1537 * the entire request can be satisfied from the cache,
1538 * so let the compiler unroll the fixed length copy loop.
1539 */
1540 cache->len -= n;
1541 for (index = 0; index < n; index++)
1542 *obj_table++ = *--cache_objs;
1543
1544 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1545 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1546
1547 return 0;
1548 }
1549
1550 /*
1551 * Use the cache as much as we have to return hot objects first.
1552 * If the request size 'n' is known at build time, the above comparison
1553 * ensures that n > cache->len here, so omit RTE_MIN().
1554 */
1555 len = __rte_constant(n) ? cache->len : RTE_MIN(n, cache->len);
1556 cache->len -= len;
1557 remaining = n - len;
1558 for (index = 0; index < len; index++)
1559 *obj_table++ = *--cache_objs;
1560
1561 /*
1562 * If the request size 'n' is known at build time, the case
1563 * where the entire request can be satisfied from the cache
1564 * has already been handled above, so omit handling it here.
1565 */
1566 if (!__rte_constant(n) && likely(remaining == 0)) {
1567 /* The entire request is satisfied from the cache. */
1568
1569 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1570 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1571
1572 return 0;
1573 }
1574
1575 /* Dequeue below would overflow mem allocated for cache? */
1576 if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1577 goto driver_dequeue;
1578
1579 /* Fill the cache from the backend; fetch size + remaining objects. */
1580 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1581 cache->size + remaining);
1582 if (unlikely(ret < 0)) {
1583 /*
1584 * We are buffer constrained, and not able to fetch all that.
1585 * Do not fill the cache, just satisfy the remaining part of
1586 * the request directly from the backend.
1587 */
1588 goto driver_dequeue;
1589 }
1590
1591 /* Satisfy the remaining part of the request from the filled cache. */
1592 __rte_assume(cache->size <= RTE_MEMPOOL_CACHE_MAX_SIZE);
1593 __rte_assume(remaining <= RTE_MEMPOOL_CACHE_MAX_SIZE);
1594 cache_objs = &cache->objs[cache->size + remaining];
1595 for (index = 0; index < remaining; index++)
1596 *obj_table++ = *--cache_objs;
1597
1598 cache->len = cache->size;
1599
1600 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1601 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1602
1603 return 0;
1604
1605driver_dequeue:
1606
1607 /* Get remaining objects directly from the backend. */
1608 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1609
1610 if (unlikely(ret < 0)) {
1611 if (likely(cache != NULL)) {
1612 cache->len = n - remaining;
1613 /*
1614 * No further action is required to roll the first part
1615 * of the request back into the cache, as objects in
1616 * the cache are intact.
1617 */
1618 }
1619
1620 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1621 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1622 } else {
1623 if (likely(cache != NULL)) {
1624 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1625 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1626 } else {
1627 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1628 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1629 }
1630 __rte_assume(ret == 0);
1631 }
1632
1633 return ret;
1634}
1635
1656static __rte_always_inline int
1657rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1658 unsigned int n, struct rte_mempool_cache *cache)
1659{
1660 int ret;
1661 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1662 if (likely(ret == 0))
1663 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1664 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1665 return ret;
1666}
1667
1690static __rte_always_inline int
1691rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1692{
1693 struct rte_mempool_cache *cache;
1695 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1696 return rte_mempool_generic_get(mp, obj_table, n, cache);
1697}
1698
1719static __rte_always_inline int
1720rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1721{
1722 return rte_mempool_get_bulk(mp, obj_p, 1);
1723}
1724
1746static __rte_always_inline int
1748 void **first_obj_table, unsigned int n)
1749{
1750 int ret;
1751
1752 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1753 if (likely(ret == 0)) {
1754 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1755 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1756 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1757 1);
1758 } else {
1759 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1760 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1761 }
1762
1763 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1764 return ret;
1765}
1766
1779unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1780
1793unsigned int
1795
1809static inline int
1811{
1812 return rte_mempool_avail_count(mp) == mp->size;
1813}
1814
1828static inline int
1830{
1831 return rte_mempool_avail_count(mp) == 0;
1832}
1833
1844static inline rte_iova_t
1846{
1847 const struct rte_mempool_objhdr *hdr;
1848 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1849 sizeof(*hdr));
1850 return hdr->iova;
1851}
1852
1864
1873static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1874{
1875 return (char *)mp +
1876 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1877}
1878
1886
1899
1917uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1918 struct rte_mempool_objsz *sz);
1919
1928void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1929 void *arg);
1930
1937 void *start;
1939 size_t length;
1942};
1943
1959__rte_experimental
1960int
1962 struct rte_mempool_mem_range_info *mem_range);
1963
1976__rte_experimental
1977size_t
1979
1984int
1985rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1986
1996};
1997
2007typedef void (rte_mempool_event_callback)(
2008 enum rte_mempool_event event,
2009 struct rte_mempool *mp,
2010 void *user_data);
2011
2028__rte_internal
2029int
2030rte_mempool_event_callback_register(rte_mempool_event_callback *func,
2031 void *user_data);
2032
2046__rte_internal
2047int
2048rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
2049 void *user_data);
2050
2051#ifdef __cplusplus
2052}
2053#endif
2054
2055#endif /* _RTE_MEMPOOL_H_ */
#define likely(x)
#define unlikely(x)
#define __rte_assume(condition)
Definition: rte_common.h:533
#define RTE_MIN(a, b)
Definition: rte_common.h:795
#define __rte_dealloc(dealloc, argno)
Definition: rte_common.h:339
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:559
uint64_t rte_iova_t
Definition: rte_common.h:770
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:554
#define RTE_CACHE_GUARD
Definition: rte_common.h:755
#define __rte_cache_aligned
Definition: rte_common.h:739
#define __rte_malloc
Definition: rte_common.h:328
#define __rte_always_inline
Definition: rte_common.h:490
#define LCORE_ID_ANY
Definition: rte_lcore.h:26
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:78
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
Definition: rte_mempool.h:197
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned int flags) __rte_malloc __rte_dealloc(rte_mempool_free
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1691
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
Definition: rte_mempool.h:1339
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
Definition: rte_mempool.h:999
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:483
static struct rte_mempool * rte_mempool_from_obj(void *obj)
Definition: rte_mempool.h:392
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1845
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:492
void rte_mempool_free(struct rte_mempool *mp)
rte_mempool_event
Definition: rte_mempool.h:1991
@ RTE_MEMPOOL_EVENT_DESTROY
Definition: rte_mempool.h:1995
@ RTE_MEMPOOL_EVENT_READY
Definition: rte_mempool.h:1993
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
Definition: rte_mempool.h:504
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
Definition: rte_mempool.h:595
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:1747
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:498
void(* rte_mempool_free_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:475
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
Definition: rte_mempool.h:1361
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:1474
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1720
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
__rte_experimental size_t rte_mempool_get_obj_alignment(const struct rte_mempool *mp)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1451
static int rte_mempool_full(const struct rte_mempool *mp)
Definition: rte_mempool.h:1810
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:470
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1657
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
Definition: rte_mempool.h:458
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
Definition: rte_mempool.h:1018
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
Definition: rte_mempool.h:1008
struct rte_mempool struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned int n, unsigned int elt_size, unsigned int cache_size, unsigned int private_data_size, int socket_id, unsigned int flags) __rte_malloc __rte_dealloc(rte_mempool_free
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
Definition: rte_mempool.h:529
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1496
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
void rte_mempool_list_dump(FILE *f)
__rte_experimental int rte_mempool_get_mem_range(const struct rte_mempool *mp, struct rte_mempool_mem_range_info *mem_range)
#define RTE_MEMPOOL_MAX_OPS_IDX
Definition: rte_mempool.h:719
static int rte_mempool_empty(const struct rte_mempool *mp)
Definition: rte_mempool.h:1829
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
Definition: rte_mempool.h:626
struct rte_mempool struct rte_mempool int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1873
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
Definition: rte_mempool.h:687
uint32_t flushthresh
Definition: rte_mempool.h:92
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2]
Definition: rte_mempool.h:113
unsigned int contig_block_size
Definition: rte_mempool.h:224
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
struct rte_mempool * mp
Definition: rte_mempool.h:208
rte_mempool_memchunk_free_cb_t * free_cb
Definition: rte_mempool.h:212
struct rte_mempool * mp
Definition: rte_mempool.h:155
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
uint32_t header_size
Definition: rte_mempool.h:121
uint32_t trailer_size
Definition: rte_mempool.h:122
uint32_t total_size
Definition: rte_mempool.h:123
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
Definition: rte_mempool.h:736
rte_spinlock_t sl
Definition: rte_mempool.h:731
char name[RTE_MEMPOOL_OPS_NAMESIZE]
Definition: rte_mempool.h:693
rte_mempool_alloc_t alloc
Definition: rte_mempool.h:694
rte_mempool_dequeue_t dequeue
Definition: rte_mempool.h:697
rte_mempool_get_info_t get_info
Definition: rte_mempool.h:712
rte_mempool_calc_mem_size_t calc_mem_size
Definition: rte_mempool.h:703
rte_mempool_get_count get_count
Definition: rte_mempool.h:698
rte_mempool_populate_t populate
Definition: rte_mempool.h:708
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
Definition: rte_mempool.h:716
rte_mempool_free_t free
Definition: rte_mempool.h:695
rte_mempool_enqueue_t enqueue
Definition: rte_mempool.h:696
uint32_t nb_mem_chunks
Definition: rte_mempool.h:262
const struct rte_memzone * mz
Definition: rte_mempool.h:237
uint32_t populated_size
Definition: rte_mempool.h:260
uint32_t header_size
Definition: rte_mempool.h:245
uint64_t pool_id
Definition: rte_mempool.h:234
int32_t ops_index
Definition: rte_mempool.h:256
void * pool_config
Definition: rte_mempool.h:236
uint32_t trailer_size
Definition: rte_mempool.h:246
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t size
Definition: rte_mempool.h:240
uint32_t cache_size
Definition: rte_mempool.h:241
unsigned int flags
Definition: rte_mempool.h:238
uint32_t elt_size
Definition: rte_mempool.h:244
unsigned private_data_size
Definition: rte_mempool.h:248
struct rte_mempool_cache * local_cache
Definition: rte_mempool.h:258
void * pool_data
Definition: rte_mempool.h:233