6 #ifndef _RTE_MEMPOOL_H_
7 #define _RTE_MEMPOOL_H_
41 #include <sys/queue.h>
43 #include <rte_config.h>
58 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
59 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
60 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
62 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
66 struct rte_mempool_debug_stats {
69 uint64_t get_success_bulk;
70 uint64_t get_success_objs;
71 uint64_t get_fail_bulk;
72 uint64_t get_fail_objs;
87 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
102 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
103 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
104 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
107 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
109 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
112 #define MEMPOOL_PG_NUM_DEFAULT 1
114 #ifndef RTE_MEMPOOL_ALIGN
115 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
118 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
137 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
147 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
155 struct rte_mempool_objtlr {
236 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
238 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
242 #define MEMPOOL_F_NO_SPREAD 0x0001
243 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
244 #define MEMPOOL_F_SP_PUT 0x0004
245 #define MEMPOOL_F_SC_GET 0x0008
246 #define MEMPOOL_F_POOL_CREATED 0x0010
247 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020
253 #define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
265 #define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
277 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
278 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
279 unsigned __lcore_id = rte_lcore_id(); \
280 if (__lcore_id < RTE_MAX_LCORE) { \
281 mp->stats[__lcore_id].name##_objs += n; \
282 mp->stats[__lcore_id].name##_bulk += 1; \
286 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
297 #define MEMPOOL_HEADER_SIZE(mp, cs) \
298 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
299 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
324 static inline struct rte_mempool_objtlr *__mempool_get_trailer(
void *obj)
344 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
345 void *
const *obj_table_const,
unsigned n,
int free);
347 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
348 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
349 rte_mempool_check_cookies(mp, obj_table_const, n, free)
351 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
354 #define RTE_MEMPOOL_OPS_NAMESIZE 32
366 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
377 void *
const *obj_table,
unsigned int n);
383 void **obj_table,
unsigned int n);
394 unsigned int *
flags);
420 #define RTE_MEMPOOL_MAX_OPS_IDX 16
431 struct rte_mempool_ops_table {
453 rte_mempool_get_ops(
int ops_index)
486 rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
487 void **obj_table,
unsigned n)
491 ops = rte_mempool_get_ops(mp->
ops_index);
492 return ops->
dequeue(mp, obj_table, n);
509 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
514 ops = rte_mempool_get_ops(mp->
ops_index);
515 return ops->
enqueue(mp, obj_table, n);
527 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
543 rte_mempool_ops_get_capabilities(
const struct rte_mempool *mp,
544 unsigned int *flags);
563 rte_mempool_ops_register_memory_area(
const struct rte_mempool *mp,
613 #define MEMPOOL_REGISTER_OPS(ops) \
614 void mp_hdlr_init_##ops(void); \
615 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
617 rte_mempool_register_ops(&ops); \
626 void *opaque,
void *obj,
unsigned obj_idx);
796 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
881 int rte_mempool_populate_phys(
struct rte_mempool *mp,
char *vaddr,
912 const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
916 int rte_mempool_populate_phys_tab(
struct rte_mempool *mp,
char *vaddr,
917 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
1063 rte_mempool_ops_enqueue_bulk(mp, cache->
objs, cache->
len);
1083 if (lcore_id >= RTE_MAX_LCORE)
1102 __mempool_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1108 __MEMPOOL_STAT_ADD(mp, put, n);
1111 if (
unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1114 cache_objs = &cache->
objs[cache->
len];
1124 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1129 rte_mempool_ops_enqueue_bulk(mp, &cache->
objs[cache->
size],
1139 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1140 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1141 rte_panic(
"cannot put objects in mempool\n");
1143 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1164 __mempool_check_cookies(mp, obj_table, n, 0);
1165 __mempool_generic_put(mp, obj_table, n, cache);
1224 __mempool_generic_get(
struct rte_mempool *mp,
void **obj_table,
1228 uint32_t index,
len;
1235 cache_objs = cache->
objs;
1238 if (cache->
len < n) {
1240 uint32_t req = n + (cache->
size - cache->
len);
1243 ret = rte_mempool_ops_dequeue_bulk(mp,
1244 &cache->
objs[cache->
len], req);
1259 for (index = 0, len = cache->
len - 1; index < n; ++index, len--, obj_table++)
1260 *obj_table = cache_objs[
len];
1264 __MEMPOOL_STAT_ADD(mp, get_success, n);
1271 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1274 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1276 __MEMPOOL_STAT_ADD(mp, get_success, n);
1306 ret = __mempool_generic_get(mp, obj_table, n, cache);
1308 __mempool_check_cookies(mp, obj_table, n, 1);
1553 uint32_t pg_shift,
unsigned int flags);
1584 size_t total_elt_sz,
const rte_iova_t iova[], uint32_t pg_num,
1585 uint32_t pg_shift,
unsigned int flags);