7 #ifndef _RTE_MEMPOOL_H_ 8 #define _RTE_MEMPOOL_H_ 41 #include <rte_compat.h> 42 #include <rte_config.h> 57 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL 58 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL 59 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL 61 #ifdef RTE_LIBRTE_MEMPOOL_STATS 68 struct rte_mempool_debug_stats {
71 uint64_t put_common_pool_bulk;
72 uint64_t put_common_pool_objs;
73 uint64_t get_common_pool_bulk;
74 uint64_t get_common_pool_objs;
75 uint64_t get_success_bulk;
76 uint64_t get_success_objs;
77 uint64_t get_fail_bulk;
78 uint64_t get_fail_objs;
79 uint64_t get_success_blks;
80 uint64_t get_fail_blks;
91 #ifdef RTE_LIBRTE_MEMPOOL_STATS 100 uint64_t get_success_bulk;
101 uint64_t get_success_objs;
125 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \ 126 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1) 127 #define RTE_MEMPOOL_MZ_PREFIX "MP_" 130 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" 132 #ifndef RTE_MEMPOOL_ALIGN 136 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE 139 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) 154 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 164 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 172 struct rte_mempool_objtlr {
220 char name[RTE_MEMPOOL_NAMESIZE];
251 struct rte_mempool_objhdr_list elt_list;
253 struct rte_mempool_memhdr_list mem_list;
255 #ifdef RTE_LIBRTE_MEMPOOL_STATS 260 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
265 #define RTE_MEMPOOL_F_NO_SPREAD 0x0001 270 #define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD 272 #define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002 277 #define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN 279 #define RTE_MEMPOOL_F_SP_PUT 0x0004 284 #define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT 286 #define RTE_MEMPOOL_F_SC_GET 0x0008 291 #define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET 293 #define RTE_MEMPOOL_F_POOL_CREATED 0x0010 295 #define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020 300 #define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG 302 #define RTE_MEMPOOL_F_NON_IO 0x0040 307 #define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \ 308 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \ 309 | RTE_MEMPOOL_F_SP_PUT \ 310 | RTE_MEMPOOL_F_SC_GET \ 311 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \ 324 #ifdef RTE_LIBRTE_MEMPOOL_STATS 325 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \ 326 unsigned int __lcore_id = rte_lcore_id(); \ 327 if (likely(__lcore_id < RTE_MAX_LCORE)) \ 328 (mp)->stats[__lcore_id].name += (n); \ 330 __atomic_fetch_add(&((mp)->stats[RTE_MAX_LCORE].name), \ 331 (n), __ATOMIC_RELAXED); \ 334 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0) 347 #ifdef RTE_LIBRTE_MEMPOOL_STATS 348 #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n)) 350 #define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0) 361 #define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \ 362 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ 363 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) 367 rte_mempool_get_header(
void *obj)
389 static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(
void *obj)
409 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
410 void *
const *obj_table_const,
unsigned n,
int free);
412 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 413 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \ 414 rte_mempool_check_cookies(mp, obj_table_const, n, free) 416 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0) 434 void rte_mempool_contig_blocks_check_cookies(
const struct rte_mempool *mp,
435 void *
const *first_obj_table_const,
unsigned int n,
int free);
437 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 438 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 440 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ 443 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 448 #define RTE_MEMPOOL_OPS_NAMESIZE 32 460 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); 471 void *
const *obj_table,
unsigned int n);
477 void **obj_table,
unsigned int n);
483 void **first_obj_table,
unsigned int n);
514 uint32_t obj_num, uint32_t pg_shift,
515 size_t *min_chunk_size,
size_t *align);
552 ssize_t rte_mempool_op_calc_mem_size_helper(
const struct rte_mempool *mp,
553 uint32_t obj_num, uint32_t pg_shift,
size_t chunk_reserve,
554 size_t *min_chunk_size,
size_t *align);
564 uint32_t obj_num, uint32_t pg_shift,
565 size_t *min_chunk_size,
size_t *align);
611 unsigned int max_objs,
618 #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 652 int rte_mempool_op_populate_helper(
struct rte_mempool *mp,
653 unsigned int flags,
unsigned int max_objs,
664 unsigned int max_objs,
703 #define RTE_MEMPOOL_MAX_OPS_IDX 16 714 struct rte_mempool_ops_table { 736 rte_mempool_get_ops(
int ops_index)
769 rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
770 void **obj_table,
unsigned n)
775 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
776 ops = rte_mempool_get_ops(mp->
ops_index);
777 ret = ops->
dequeue(mp, obj_table, n);
779 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
780 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
799 rte_mempool_ops_dequeue_contig_blocks(
struct rte_mempool *mp,
800 void **first_obj_table,
unsigned int n)
804 ops = rte_mempool_get_ops(mp->
ops_index);
806 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
824 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
830 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
831 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
832 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
833 ops = rte_mempool_get_ops(mp->
ops_index);
834 ret = ops->
enqueue(mp, obj_table, n);
835 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 837 RTE_LOG(CRIT, MEMPOOL,
"cannot enqueue %u objects to mempool %s\n",
852 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
873 ssize_t rte_mempool_ops_calc_mem_size(
const struct rte_mempool *mp,
874 uint32_t obj_num, uint32_t pg_shift,
875 size_t *min_chunk_size,
size_t *align);
900 int rte_mempool_ops_populate(
struct rte_mempool *mp,
unsigned int max_objs,
967 #define RTE_MEMPOOL_REGISTER_OPS(ops) \ 968 RTE_INIT(mp_hdlr_init_##ops) \ 970 rte_mempool_register_ops(&ops); \ 979 void *opaque,
void *obj,
unsigned obj_idx);
1320 if (lcore_id >= RTE_MAX_LCORE)
1323 rte_mempool_trace_default_cache(mp, lcore_id,
1342 if (cache == NULL || cache->
len == 0)
1344 rte_mempool_trace_cache_flush(cache, mp);
1345 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->
len);
1362 rte_mempool_do_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1369 goto driver_enqueue;
1372 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1373 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1377 goto driver_enqueue_stats_incremented;
1387 cache_objs = &cache->objs[cache->
len];
1390 cache_objs = &cache->objs[0];
1391 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->
len);
1396 rte_memcpy(cache_objs, obj_table,
sizeof(
void *) * n);
1403 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1404 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1406 driver_enqueue_stats_incremented:
1409 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1429 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1430 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1431 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1454 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1491 rte_mempool_do_generic_get(
struct rte_mempool *mp,
void **obj_table,
1495 unsigned int remaining;
1496 uint32_t index,
len;
1502 goto driver_dequeue;
1506 cache_objs = &cache->objs[cache->
len];
1508 if (__extension__(__builtin_constant_p(n)) && n <= cache->len) {
1515 for (index = 0; index < n; index++)
1516 *obj_table++ = *--cache_objs;
1518 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1519 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1529 len = __extension__(__builtin_constant_p(n)) ? cache->
len :
1532 remaining = n -
len;
1533 for (index = 0; index <
len; index++)
1534 *obj_table++ = *--cache_objs;
1541 if (!__extension__(__builtin_constant_p(n)) && remaining == 0) {
1544 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1545 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1551 if (
unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1552 goto driver_dequeue;
1555 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1556 cache->
size + remaining);
1564 goto driver_dequeue;
1568 cache_objs = &cache->objs[cache->
size + remaining];
1569 for (index = 0; index < remaining; index++)
1570 *obj_table++ = *--cache_objs;
1574 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1575 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1582 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1585 if (
likely(cache != NULL)) {
1586 cache->
len = n - remaining;
1594 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1595 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1597 if (
likely(cache != NULL)) {
1598 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1599 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1601 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1602 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1634 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1636 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1637 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1668 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1721 void **first_obj_table,
unsigned int n)
1725 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1727 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1728 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1729 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1732 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1733 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1736 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1909 rte_mempool_get_page_size(
struct rte_mempool *mp,
size_t *pg_sz);
1931 typedef void (rte_mempool_event_callback)(
1954 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1972 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
#define __rte_always_inline
struct rte_mempool * rte_mempool_lookup(const char *name)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
const struct rte_memzone * mz
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
void rte_mempool_list_dump(FILE *f)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
rte_mempool_alloc_t alloc
void *objs [RTE_MEMPOOL_CACHE_MAX_SIZE *2] __rte_cache_aligned
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
#define RTE_LOG(l, t,...)
static int rte_mempool_empty(const struct rte_mempool *mp)
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
rte_mempool_memchunk_free_cb_t * free_cb
char name[RTE_MEMPOOL_OPS_NAMESIZE]
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
rte_mempool_get_info_t get_info
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
char name[RTE_MEMPOOL_NAMESIZE]
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
#define RTE_PTR_ADD(ptr, x)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
int rte_mempool_populate_default(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
#define RTE_MEMPOOL_OPS_NAMESIZE
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_mempool_get_count get_count
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
static int rte_mempool_full(const struct rte_mempool *mp)
static unsigned rte_lcore_id(void)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
rte_mempool_calc_mem_size_t calc_mem_size
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
unsigned int contig_block_size
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
void rte_mempool_audit(struct rte_mempool *mp)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
unsigned private_data_size
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool_cache * local_cache
#define __rte_cache_aligned
rte_mempool_dequeue_t dequeue
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
rte_mempool_populate_t populate
void rte_mempool_free(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
#define RTE_MEMPOOL_MAX_OPS_IDX
void(* rte_mempool_free_t)(struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
rte_mempool_enqueue_t enqueue