35 #ifndef _RTE_MEMPOOL_H_
36 #define _RTE_MEMPOOL_H_
68 #include <sys/queue.h>
84 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
85 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
86 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
88 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
92 struct rte_mempool_debug_stats {
95 uint64_t get_success_bulk;
96 uint64_t get_success_objs;
97 uint64_t get_fail_bulk;
98 uint64_t get_fail_objs;
113 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
128 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
129 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
130 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
133 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
135 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
138 #define MEMPOOL_PG_NUM_DEFAULT 1
140 #ifndef RTE_MEMPOOL_ALIGN
141 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
144 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
159 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
169 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
177 struct rte_mempool_objtlr {
254 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
256 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
260 #define MEMPOOL_F_NO_SPREAD 0x0001
261 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
262 #define MEMPOOL_F_SP_PUT 0x0004
263 #define MEMPOOL_F_SC_GET 0x0008
264 #define MEMPOOL_F_POOL_CREATED 0x0010
265 #define MEMPOOL_F_NO_PHYS_CONTIG 0x0020
277 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
278 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
279 unsigned __lcore_id = rte_lcore_id(); \
280 if (__lcore_id < RTE_MAX_LCORE) { \
281 mp->stats[__lcore_id].name##_objs += n; \
282 mp->stats[__lcore_id].name##_bulk += 1; \
286 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
297 #define MEMPOOL_HEADER_SIZE(mp, cs) \
298 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
299 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
324 static inline struct rte_mempool_objtlr *__mempool_get_trailer(
void *obj)
344 void rte_mempool_check_cookies(
const struct rte_mempool *mp,
345 void *
const *obj_table_const,
unsigned n,
int free);
347 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
348 #define __mempool_check_cookies(mp, obj_table_const, n, free) \
349 rte_mempool_check_cookies(mp, obj_table_const, n, free)
351 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
354 #define RTE_MEMPOOL_OPS_NAMESIZE 32
366 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
377 void *
const *obj_table,
unsigned int n);
383 void **obj_table,
unsigned int n);
400 #define RTE_MEMPOOL_MAX_OPS_IDX 16
411 struct rte_mempool_ops_table {
433 rte_mempool_get_ops(
int ops_index)
467 void **obj_table,
unsigned n)
471 ops = rte_mempool_get_ops(mp->
ops_index);
472 return ops->
dequeue(mp, obj_table, n);
489 rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
494 ops = rte_mempool_get_ops(mp->
ops_index);
495 return ops->
enqueue(mp, obj_table, n);
507 rte_mempool_ops_get_count(
const struct rte_mempool *mp);
556 #define MEMPOOL_REGISTER_OPS(ops) \
557 void mp_hdlr_init_##ops(void); \
558 void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
560 rte_mempool_register_ops(&ops); \
569 void *opaque,
void *obj,
unsigned obj_idx);
740 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
852 const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
994 static inline void __attribute__((always_inline))
998 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1015 if (mp->cache_size == 0)
1018 if (lcore_id >= RTE_MAX_LCORE)
1021 return &mp->local_cache[lcore_id];
1039 static inline void __attribute__((always_inline))
1040 __mempool_generic_put(struct
rte_mempool *mp,
void * const *obj_table,
1046 __MEMPOOL_STAT_ADD(mp, put, n);
1049 if (
unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1052 cache_objs = &cache->objs[cache->len];
1062 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1066 if (cache->len >= cache->flushthresh) {
1067 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
1068 cache->len - cache->size);
1069 cache->len = cache->size;
1077 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1078 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1079 rte_panic(
"cannot put objects in mempool\n");
1081 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1101 static inline void __attribute__((always_inline))
1106 __mempool_check_cookies(mp, obj_table, n, 0);
1107 __mempool_generic_put(mp, obj_table, n, cache);
1122 static inline void __attribute__((always_inline))
1143 static inline void __attribute__((always_inline))
1164 static inline void __attribute__((always_inline))
1183 static inline void __attribute__((always_inline))
1201 static inline void __attribute__((always_inline))
1219 static inline void __attribute__((always_inline))
1242 static inline int __attribute__((always_inline))
1243 __mempool_generic_get(struct
rte_mempool *mp,
void **obj_table,
1247 uint32_t index,
len;
1251 if (
unlikely(cache == NULL || n >= cache->size))
1254 cache_objs = cache->objs;
1257 if (cache->len < n) {
1259 uint32_t req = n + (cache->size - cache->len);
1262 ret = rte_mempool_ops_dequeue_bulk(mp,
1263 &cache->objs[cache->len], req);
1278 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
1279 *obj_table = cache_objs[
len];
1283 __MEMPOOL_STAT_ADD(mp, get_success, n);
1290 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1293 __MEMPOOL_STAT_ADD(mp, get_fail, n);
1295 __MEMPOOL_STAT_ADD(mp, get_success, n);
1323 static inline int __attribute__((always_inline))
1328 ret = __mempool_generic_get(mp, obj_table, n, cache);
1330 __mempool_check_cookies(mp, obj_table, n, 1);
1354 static inline int __attribute__((always_inline))
1383 static inline int __attribute__((always_inline))
1412 static inline int __attribute__((always_inline))
1438 static inline int __attribute__((always_inline))
1464 static inline int __attribute__((always_inline))
1490 static inline int __attribute__((always_inline))
1560 static inline unsigned
1745 size_t total_elt_sz,
const phys_addr_t paddr[], uint32_t pg_num,