7 #ifndef _RTE_MEMPOOL_H_ 8 #define _RTE_MEMPOOL_H_ 42 #include <rte_compat.h> 43 #include <rte_config.h> 59 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL 60 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL 61 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL 63 #ifdef RTE_LIBRTE_MEMPOOL_STATS 73 uint64_t put_common_pool_bulk;
74 uint64_t put_common_pool_objs;
75 uint64_t get_common_pool_bulk;
76 uint64_t get_common_pool_objs;
77 uint64_t get_success_bulk;
78 uint64_t get_success_objs;
79 uint64_t get_fail_bulk;
80 uint64_t get_fail_objs;
81 uint64_t get_success_blks;
82 uint64_t get_fail_blks;
94 #ifdef RTE_LIBRTE_MEMPOOL_STATS 103 uint64_t get_success_bulk;
104 uint64_t get_success_objs;
113 alignas(RTE_CACHE_LINE_SIZE)
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2];
128 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \ 129 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1) 130 #define RTE_MEMPOOL_MZ_PREFIX "MP_" 133 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" 135 #ifndef RTE_MEMPOOL_ALIGN 139 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE 142 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) 157 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 167 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 175 struct rte_mempool_objtlr {
184 extern int rte_mempool_logtype;
185 #define RTE_LOGTYPE_MEMPOOL rte_mempool_logtype 186 #define RTE_MEMPOOL_LOG(level, ...) \ 187 RTE_LOG_LINE(level, MEMPOOL, "" __VA_ARGS__) 231 char name[RTE_MEMPOOL_NAMESIZE];
261 struct rte_mempool_objhdr_list elt_list;
263 struct rte_mempool_memhdr_list mem_list;
265 #ifdef RTE_LIBRTE_MEMPOOL_STATS 270 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
275 #define RTE_MEMPOOL_F_NO_SPREAD 0x0001 280 #define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD 282 #define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002 287 #define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN 289 #define RTE_MEMPOOL_F_SP_PUT 0x0004 294 #define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT 296 #define RTE_MEMPOOL_F_SC_GET 0x0008 301 #define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET 303 #define RTE_MEMPOOL_F_POOL_CREATED 0x0010 305 #define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020 310 #define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG 312 #define RTE_MEMPOOL_F_NON_IO 0x0040 317 #define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \ 318 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \ 319 | RTE_MEMPOOL_F_SP_PUT \ 320 | RTE_MEMPOOL_F_SC_GET \ 321 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \ 334 #ifdef RTE_LIBRTE_MEMPOOL_STATS 335 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \ 336 unsigned int __lcore_id = rte_lcore_id(); \ 337 if (likely(__lcore_id < RTE_MAX_LCORE)) \ 338 (mp)->stats[__lcore_id].name += (n); \ 340 rte_atomic_fetch_add_explicit(&((mp)->stats[RTE_MAX_LCORE].name), \ 341 (n), rte_memory_order_relaxed); \ 344 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0) 357 #ifdef RTE_LIBRTE_MEMPOOL_STATS 358 #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n)) 360 #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0) 371 #define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \ 372 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ 373 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) 377 rte_mempool_get_header(
void *obj)
399 static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(
void *obj)
419 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
420 void *
const *obj_table_const,
unsigned n,
int free);
422 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 423 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \ 424 rte_mempool_check_cookies(mp, obj_table_const, n, free) 426 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0) 444 void rte_mempool_contig_blocks_check_cookies(
const struct rte_mempool *mp,
445 void *
const *first_obj_table_const,
unsigned int n,
int free);
447 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 448 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 450 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ 453 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 458 #define RTE_MEMPOOL_OPS_NAMESIZE 32 470 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); 484 void *
const *obj_table,
unsigned int n);
493 void **obj_table,
unsigned int n);
499 void **first_obj_table,
unsigned int n);
530 uint32_t obj_num, uint32_t pg_shift,
531 size_t *min_chunk_size,
size_t *align);
568 ssize_t rte_mempool_op_calc_mem_size_helper(
const struct rte_mempool *mp,
569 uint32_t obj_num, uint32_t pg_shift,
size_t chunk_reserve,
570 size_t *min_chunk_size,
size_t *align);
580 uint32_t obj_num, uint32_t pg_shift,
581 size_t *min_chunk_size,
size_t *align);
627 unsigned int max_objs,
634 #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 668 int rte_mempool_op_populate_helper(
struct rte_mempool *mp,
669 unsigned int flags,
unsigned int max_objs,
680 unsigned int max_objs,
719 #define RTE_MEMPOOL_MAX_OPS_IDX 16 730 struct __rte_cache_aligned rte_mempool_ops_table { 752 rte_mempool_get_ops(
int ops_index)
785 rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
786 void **obj_table,
unsigned n)
791 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
792 ops = rte_mempool_get_ops(mp->
ops_index);
793 ret = ops->
dequeue(mp, obj_table, n);
795 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
796 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
815 rte_mempool_ops_dequeue_contig_blocks(
struct rte_mempool *mp,
816 void **first_obj_table,
unsigned int n)
820 ops = rte_mempool_get_ops(mp->
ops_index);
822 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
840 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
846 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
847 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
848 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
849 ops = rte_mempool_get_ops(mp->
ops_index);
850 ret = ops->
enqueue(mp, obj_table, n);
851 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 853 RTE_MEMPOOL_LOG(CRIT,
"cannot enqueue %u objects to mempool %s",
868 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
889 ssize_t rte_mempool_ops_calc_mem_size(
const struct rte_mempool *mp,
890 uint32_t obj_num, uint32_t pg_shift,
891 size_t *min_chunk_size,
size_t *align);
916 int rte_mempool_ops_populate(
struct rte_mempool *mp,
unsigned int max_objs,
983 #define RTE_MEMPOOL_REGISTER_OPS(ops) \ 984 RTE_INIT(mp_hdlr_init_##ops) \ 986 rte_mempool_register_ops(&ops); \ 995 void *opaque,
void *obj,
unsigned obj_idx);
1336 if (lcore_id >= RTE_MAX_LCORE)
1339 rte_mempool_trace_default_cache(mp, lcore_id,
1358 if (cache == NULL || cache->
len == 0)
1360 rte_mempool_trace_cache_flush(cache, mp);
1361 rte_mempool_ops_enqueue_bulk(mp, cache->
objs, cache->
len);
1378 rte_mempool_do_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1385 goto driver_enqueue;
1388 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1389 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1393 goto driver_enqueue_stats_incremented;
1403 cache_objs = &cache->
objs[cache->
len];
1406 cache_objs = &cache->
objs[0];
1407 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->
len);
1412 rte_memcpy(cache_objs, obj_table,
sizeof(
void *) * n);
1419 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1420 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1422 driver_enqueue_stats_incremented:
1425 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1445 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1446 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1447 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1470 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1507 rte_mempool_do_generic_get(
struct rte_mempool *mp,
void **obj_table,
1511 unsigned int remaining;
1512 uint32_t index,
len;
1518 goto driver_dequeue;
1522 cache_objs = &cache->
objs[cache->
len];
1524 if (__rte_constant(n) && n <= cache->len) {
1531 for (index = 0; index < n; index++)
1532 *obj_table++ = *--cache_objs;
1534 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1535 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1545 len = __rte_constant(n) ? cache->
len :
RTE_MIN(n, cache->
len);
1547 remaining = n -
len;
1548 for (index = 0; index <
len; index++)
1549 *obj_table++ = *--cache_objs;
1556 if (!__rte_constant(n) && remaining == 0) {
1559 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1560 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1566 if (
unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1567 goto driver_dequeue;
1570 ret = rte_mempool_ops_dequeue_bulk(mp, cache->
objs,
1571 cache->
size + remaining);
1579 goto driver_dequeue;
1583 cache_objs = &cache->
objs[cache->
size + remaining];
1584 for (index = 0; index < remaining; index++)
1585 *obj_table++ = *--cache_objs;
1589 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1590 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1597 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1600 if (
likely(cache != NULL)) {
1601 cache->
len = n - remaining;
1609 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1610 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1612 if (
likely(cache != NULL)) {
1613 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1614 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1616 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1617 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1649 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1651 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1652 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1683 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1736 void **first_obj_table,
unsigned int n)
1740 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1742 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1743 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1744 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1747 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1748 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1751 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1973 rte_mempool_get_page_size(
struct rte_mempool *mp,
size_t *pg_sz);
1995 typedef void (rte_mempool_event_callback)(
2018 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
2036 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
#define __rte_always_inline
struct rte_mempool * rte_mempool_lookup(const char *name)
__rte_experimental size_t rte_mempool_get_obj_alignment(const struct rte_mempool *mp)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
const struct rte_memzone * mz
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
void rte_mempool_list_dump(FILE *f)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
rte_mempool_alloc_t alloc
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
static int rte_mempool_empty(const struct rte_mempool *mp)
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
rte_mempool_memchunk_free_cb_t * free_cb
char name[RTE_MEMPOOL_OPS_NAMESIZE]
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
rte_mempool_get_info_t get_info
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
#define __rte_cache_aligned
char name[RTE_MEMPOOL_NAMESIZE]
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
#define RTE_PTR_ADD(ptr, x)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
int rte_mempool_populate_default(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
#define RTE_MEMPOOL_OPS_NAMESIZE
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_mempool_get_count get_count
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
static int rte_mempool_full(const struct rte_mempool *mp)
static unsigned rte_lcore_id(void)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
rte_mempool_calc_mem_size_t calc_mem_size
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
unsigned int contig_block_size
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
void rte_mempool_audit(struct rte_mempool *mp)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
unsigned private_data_size
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
int rte_mempool_populate_anon(struct rte_mempool *mp)
__rte_experimental int rte_mempool_get_mem_range(const struct rte_mempool *mp, struct rte_mempool_mem_range_info *mem_range)
struct rte_mempool_cache * local_cache
rte_mempool_dequeue_t dequeue
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
rte_mempool_populate_t populate
void rte_mempool_free(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2]
static void * rte_mempool_get_priv(struct rte_mempool *mp)
#define RTE_MEMPOOL_MAX_OPS_IDX
void(* rte_mempool_free_t)(struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
rte_mempool_enqueue_t enqueue