6 #ifndef _RTE_MEMPOOL_H_
7 #define _RTE_MEMPOOL_H_
41 #include <sys/queue.h>
43 #include <rte_config.h>
58 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
59 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
60 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
62 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
66 struct rte_mempool_debug_stats {
69 uint64_t get_success_bulk;
70 uint64_t get_success_objs;
71 uint64_t get_fail_bulk;
72 uint64_t get_fail_objs;
74 uint64_t get_success_blks;
76 uint64_t get_fail_blks;
91 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
106 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
107 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
108 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
111 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
113 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
116 #define MEMPOOL_PG_NUM_DEFAULT 1
118 #ifndef RTE_MEMPOOL_ALIGN
119 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
122 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
141 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
151 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
159 struct rte_mempool_objtlr {
254 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
256 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
260 #define MEMPOOL_F_NO_SPREAD 0x0001
261 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
262 #define MEMPOOL_F_SP_PUT 0x0004
263 #define MEMPOOL_F_SC_GET 0x0008
264 #define MEMPOOL_F_POOL_CREATED 0x0010
265 #define MEMPOOL_F_NO_IOVA_CONTIG 0x0020
266 #define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG
278 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
279 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
280 unsigned __lcore_id = rte_lcore_id(); \
281 if (__lcore_id < RTE_MAX_LCORE) { \
282 mp->stats[__lcore_id].name##_objs += n; \
283 mp->stats[__lcore_id].name##_bulk += 1; \
286 #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \
287 unsigned int __lcore_id = rte_lcore_id(); \
288 if (__lcore_id < RTE_MAX_LCORE) { \
289 mp->stats[__lcore_id].name##_blks += n; \
290 mp->stats[__lcore_id].name##_bulk += 1; \
294 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
295 #define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0)
306 #define MEMPOOL_HEADER_SIZE(mp, cs) \
307 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
308 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
333 static inline struct rte_mempool_objtlr *__mempool_get_trailer(
void *obj)
353 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
354 void *
const *obj_table_const,
unsigned n,
int free);
356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
357 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
358 rte_mempool_check_cookies(mp, obj_table_const, n, free)
360 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
382 void *
const *first_obj_table_const,
unsigned int n,
int free);
384 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
385 #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
387 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
390 #define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
395 #define RTE_MEMPOOL_OPS_NAMESIZE 32
407 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
418 void *
const *obj_table,
unsigned int n);
424 void **obj_table,
unsigned int n);
433 void **first_obj_table,
unsigned int n);
464 uint32_t obj_num, uint32_t pg_shift,
465 size_t *min_chunk_size,
size_t *align);
487 uint32_t obj_num, uint32_t pg_shift,
488 size_t *min_chunk_size,
size_t *align);
509 size_t rte_mempool_calc_mem_size_helper(uint32_t elt_num,
size_t total_elt_sz,
556 unsigned int max_objs,
565 unsigned int max_objs,
607 #define RTE_MEMPOOL_MAX_OPS_IDX 16
618 struct rte_mempool_ops_table {
640 rte_mempool_get_ops(
int ops_index)
673 rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
674 void **obj_table,
unsigned n)
678 ops = rte_mempool_get_ops(mp->
ops_index);
679 return ops->
dequeue(mp, obj_table, n);
696 rte_mempool_ops_dequeue_contig_blocks(
struct rte_mempool *mp,
697 void **first_obj_table,
unsigned int n)
701 ops = rte_mempool_get_ops(mp->
ops_index);
720 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
725 ops = rte_mempool_get_ops(mp->
ops_index);
726 return ops->
enqueue(mp, obj_table, n);
738 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
759 ssize_t rte_mempool_ops_calc_mem_size(
const struct rte_mempool *mp,
760 uint32_t obj_num, uint32_t pg_shift,
761 size_t *min_chunk_size,
size_t *align);
786 int rte_mempool_ops_populate(
struct rte_mempool *mp,
unsigned int max_objs,
857 #define MEMPOOL_REGISTER_OPS(ops) \
858 void mp_hdlr_init_##ops(void); \
859 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
861 rte_mempool_register_ops(&ops); \
870 void *opaque,
void *obj,
unsigned obj_idx);
1042 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
1127 int rte_mempool_populate_phys(
struct rte_mempool *mp,
char *vaddr,
1160 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
1164 int rte_mempool_populate_phys_tab(
struct rte_mempool *mp,
char *vaddr,
1165 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
1315 if (lcore_id >= RTE_MAX_LCORE)
1335 if (cache == NULL || cache->
len == 0)
1337 rte_mempool_ops_enqueue_bulk(mp, cache->
objs, cache->
len);
1354 __mempool_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1360 __MEMPOOL_STAT_ADD(mp, put, n);
1363 if (
unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1366 cache_objs = &cache->
objs[cache->
len];
1376 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1381 rte_mempool_ops_enqueue_bulk(mp, &cache->
objs[cache->
size],
1391 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1392 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1393 rte_panic(
"cannot put objects in mempool\n");
1395 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1416 __mempool_check_cookies(mp, obj_table, n, 0);
1417 __mempool_generic_put(mp, obj_table, n, cache);
1476 __mempool_generic_get(
struct rte_mempool *mp,
void **obj_table,
1480 uint32_t index,
len;
1487 cache_objs = cache->
objs;
1490 if (cache->
len < n) {
1492 uint32_t req = n + (cache->
size - cache->
len);
1495 ret = rte_mempool_ops_dequeue_bulk(mp,
1496 &cache->
objs[cache->
len], req);
1511 for (index = 0, len = cache->
len - 1; index < n; ++index, len--, obj_table++)
1512 *obj_table = cache_objs[
len];
1516 __MEMPOOL_STAT_ADD(mp, get_success, n);
1523 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1526 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1528 __MEMPOOL_STAT_ADD(mp, get_success, n);
1558 ret = __mempool_generic_get(mp, obj_table, n, cache);
1560 __mempool_check_cookies(mp, obj_table, n, 1);
1647 void **first_obj_table,
unsigned int n)
1651 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1653 __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n);
1654 __mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
1657 __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
1850 uint32_t pg_shift,
unsigned int flags);
1883 size_t total_elt_sz,
const rte_iova_t iova[], uint32_t pg_num,
1884 uint32_t pg_shift,
unsigned int flags);