DPDK  2.1.0
rte_mempool.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_MEMPOOL_H_
35 #define _RTE_MEMPOOL_H_
36 
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <stdint.h>
65 #include <errno.h>
66 #include <inttypes.h>
67 #include <sys/queue.h>
68 
69 #include <rte_log.h>
70 #include <rte_debug.h>
71 #include <rte_lcore.h>
72 #include <rte_memory.h>
73 #include <rte_branch_prediction.h>
74 #include <rte_ring.h>
75 
76 #ifdef __cplusplus
77 extern "C" {
78 #endif
79 
80 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
81 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
82 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
84 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
85 
88 struct rte_mempool_debug_stats {
89  uint64_t put_bulk;
90  uint64_t put_objs;
91  uint64_t get_success_bulk;
92  uint64_t get_success_objs;
93  uint64_t get_fail_bulk;
94  uint64_t get_fail_objs;
96 #endif
97 
98 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
99 
102 struct rte_mempool_cache {
103  unsigned len;
104  /*
105  * Cache is allocated to this size to allow it to overflow in certain
106  * cases to avoid needless emptying of cache.
107  */
108  void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
110 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
111 
116  uint32_t elt_size;
117  uint32_t header_size;
118  uint32_t trailer_size;
119  uint32_t total_size;
121 };
122 
123 #define RTE_MEMPOOL_NAMESIZE 32
124 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
125 
126 /* "MP_<name>" */
127 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 
129 #ifdef RTE_LIBRTE_XEN_DOM0
130 
131 /* "<name>_MP_elt" */
132 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
133 
134 #else
135 
136 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
137 
138 #endif /* RTE_LIBRTE_XEN_DOM0 */
139 
140 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
141 
143 #define MEMPOOL_PG_NUM_DEFAULT 1
144 
145 #ifndef RTE_MEMPOOL_ALIGN
146 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
147 #endif
148 
149 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
150 
160  struct rte_mempool *mp;
161 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
162  uint64_t cookie;
163 #endif
164 };
165 
173 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
174  uint64_t cookie;
175 #endif
176 };
177 
181 struct rte_mempool {
183  struct rte_ring *ring;
185  int flags;
186  uint32_t size;
187  uint32_t cache_size;
188  uint32_t cache_flushthresh;
191  uint32_t elt_size;
192  uint32_t header_size;
193  uint32_t trailer_size;
195  unsigned private_data_size;
197 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
198 
199  struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
200 #endif
201 
202 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
203 
204  struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
205 #endif
206 
207  /* Address translation support, starts from next cache line. */
208 
210  uint32_t pg_num __rte_cache_aligned;
211  uint32_t pg_shift;
212  uintptr_t pg_mask;
213  uintptr_t elt_va_start;
215  uintptr_t elt_va_end;
221 
222 #define MEMPOOL_F_NO_SPREAD 0x0001
223 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
224 #define MEMPOOL_F_SP_PUT 0x0004
225 #define MEMPOOL_F_SC_GET 0x0008
237 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
238 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
239  unsigned __lcore_id = rte_lcore_id(); \
240  if (__lcore_id < RTE_MAX_LCORE) { \
241  mp->stats[__lcore_id].name##_objs += n; \
242  mp->stats[__lcore_id].name##_bulk += 1; \
243  } \
244  } while(0)
245 #else
246 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
247 #endif
248 
257 #define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
258  RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
259  sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
260 
264 #define MEMPOOL_IS_CONTIG(mp) \
265  ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
266  (mp)->phys_addr == (mp)->elt_pa[0])
267 
268 /* return the header of a mempool object (internal) */
269 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
270 {
271  return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr));
272 }
273 
283 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
284 {
285  struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
286  return hdr->mp;
287 }
288 
289 /* return the trailer of a mempool object (internal) */
290 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
291 {
292  struct rte_mempool *mp = rte_mempool_from_obj(obj);
293  return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
294 }
295 
310 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
311 #ifndef __INTEL_COMPILER
312 #pragma GCC diagnostic ignored "-Wcast-qual"
313 #endif
314 static inline void __mempool_check_cookies(const struct rte_mempool *mp,
315  void * const *obj_table_const,
316  unsigned n, int free)
317 {
318  struct rte_mempool_objhdr *hdr;
319  struct rte_mempool_objtlr *tlr;
320  uint64_t cookie;
321  void *tmp;
322  void *obj;
323  void **obj_table;
324 
325  /* Force to drop the "const" attribute. This is done only when
326  * DEBUG is enabled */
327  tmp = (void *) obj_table_const;
328  obj_table = (void **) tmp;
329 
330  while (n--) {
331  obj = obj_table[n];
332 
333  if (rte_mempool_from_obj(obj) != mp)
334  rte_panic("MEMPOOL: object is owned by another "
335  "mempool\n");
336 
337  hdr = __mempool_get_header(obj);
338  cookie = hdr->cookie;
339 
340  if (free == 0) {
341  if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
343  RTE_LOG(CRIT, MEMPOOL,
344  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
345  obj, (const void *) mp, cookie);
346  rte_panic("MEMPOOL: bad header cookie (put)\n");
347  }
348  hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
349  }
350  else if (free == 1) {
351  if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
353  RTE_LOG(CRIT, MEMPOOL,
354  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
355  obj, (const void *) mp, cookie);
356  rte_panic("MEMPOOL: bad header cookie (get)\n");
357  }
358  hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
359  }
360  else if (free == 2) {
361  if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
362  cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
364  RTE_LOG(CRIT, MEMPOOL,
365  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
366  obj, (const void *) mp, cookie);
367  rte_panic("MEMPOOL: bad header cookie (audit)\n");
368  }
369  }
370  tlr = __mempool_get_trailer(obj);
371  cookie = tlr->cookie;
372  if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
374  RTE_LOG(CRIT, MEMPOOL,
375  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
376  obj, (const void *) mp, cookie);
377  rte_panic("MEMPOOL: bad trailer cookie\n");
378  }
379  }
380 }
381 #ifndef __INTEL_COMPILER
382 #pragma GCC diagnostic error "-Wcast-qual"
383 #endif
384 #else
385 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
386 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
387 
391 typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
392  void * /*obj_start*/,
393  void * /*obj_end*/,
394  uint32_t /*obj_index */);
395 
431 uint32_t rte_mempool_obj_iter(void *vaddr,
432  uint32_t elt_num, size_t elt_sz, size_t align,
433  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
434  rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg);
435 
443 typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
444  void *, unsigned);
445 
452 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
453 
535 struct rte_mempool *
536 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
537  unsigned cache_size, unsigned private_data_size,
538  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
539  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
540  int socket_id, unsigned flags);
541 
635 struct rte_mempool *
636 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
637  unsigned cache_size, unsigned private_data_size,
638  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
639  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
640  int socket_id, unsigned flags, void *vaddr,
641  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
642 
643 #ifdef RTE_LIBRTE_XEN_DOM0
644 
725 struct rte_mempool *
726 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
727  unsigned cache_size, unsigned private_data_size,
728  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
729  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
730  int socket_id, unsigned flags);
731 #endif
732 
741 void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
742 
755 static inline void __attribute__((always_inline))
756 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
757  unsigned n, int is_mp)
758 {
759 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
760  struct rte_mempool_cache *cache;
761  uint32_t index;
762  void **cache_objs;
763  unsigned lcore_id = rte_lcore_id();
764  uint32_t cache_size = mp->cache_size;
765  uint32_t flushthresh = mp->cache_flushthresh;
766 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
767 
768  /* increment stat now, adding in mempool always success */
769  __MEMPOOL_STAT_ADD(mp, put, n);
770 
771 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
772  /* cache is not enabled or single producer or non-EAL thread */
773  if (unlikely(cache_size == 0 || is_mp == 0 ||
774  lcore_id >= RTE_MAX_LCORE))
775  goto ring_enqueue;
776 
777  /* Go straight to ring if put would overflow mem allocated for cache */
778  if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
779  goto ring_enqueue;
780 
781  cache = &mp->local_cache[lcore_id];
782  cache_objs = &cache->objs[cache->len];
783 
784  /*
785  * The cache follows the following algorithm
786  * 1. Add the objects to the cache
787  * 2. Anything greater than the cache min value (if it crosses the
788  * cache flush threshold) is flushed to the ring.
789  */
790 
791  /* Add elements back into the cache */
792  for (index = 0; index < n; ++index, obj_table++)
793  cache_objs[index] = *obj_table;
794 
795  cache->len += n;
796 
797  if (cache->len >= flushthresh) {
798  rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
799  cache->len - cache_size);
800  cache->len = cache_size;
801  }
802 
803  return;
804 
805 ring_enqueue:
806 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
807 
808  /* push remaining objects in ring */
809 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
810  if (is_mp) {
811  if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
812  rte_panic("cannot put objects in mempool\n");
813  }
814  else {
815  if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
816  rte_panic("cannot put objects in mempool\n");
817  }
818 #else
819  if (is_mp)
820  rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
821  else
822  rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
823 #endif
824 }
825 
826 
837 static inline void __attribute__((always_inline))
838 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
839  unsigned n)
840 {
841  __mempool_check_cookies(mp, obj_table, n, 0);
842  __mempool_put_bulk(mp, obj_table, n, 1);
843 }
844 
855 static inline void
856 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
857  unsigned n)
858 {
859  __mempool_check_cookies(mp, obj_table, n, 0);
860  __mempool_put_bulk(mp, obj_table, n, 0);
861 }
862 
877 static inline void __attribute__((always_inline))
878 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
879  unsigned n)
880 {
881  __mempool_check_cookies(mp, obj_table, n, 0);
882  __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
883 }
884 
893 static inline void __attribute__((always_inline))
894 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
895 {
896  rte_mempool_mp_put_bulk(mp, &obj, 1);
897 }
898 
907 static inline void __attribute__((always_inline))
908 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
909 {
910  rte_mempool_sp_put_bulk(mp, &obj, 1);
911 }
912 
925 static inline void __attribute__((always_inline))
926 rte_mempool_put(struct rte_mempool *mp, void *obj)
927 {
928  rte_mempool_put_bulk(mp, &obj, 1);
929 }
930 
945 static inline int __attribute__((always_inline))
946 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
947  unsigned n, int is_mc)
948 {
949  int ret;
950 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
951  struct rte_mempool_cache *cache;
952  uint32_t index, len;
953  void **cache_objs;
954  unsigned lcore_id = rte_lcore_id();
955  uint32_t cache_size = mp->cache_size;
956 
957  /* cache is not enabled or single consumer */
958  if (unlikely(cache_size == 0 || is_mc == 0 ||
959  n >= cache_size || lcore_id >= RTE_MAX_LCORE))
960  goto ring_dequeue;
961 
962  cache = &mp->local_cache[lcore_id];
963  cache_objs = cache->objs;
964 
965  /* Can this be satisfied from the cache? */
966  if (cache->len < n) {
967  /* No. Backfill the cache first, and then fill from it */
968  uint32_t req = n + (cache_size - cache->len);
969 
970  /* How many do we require i.e. number to fill the cache + the request */
971  ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
972  if (unlikely(ret < 0)) {
973  /*
974  * In the offchance that we are buffer constrained,
975  * where we are not able to allocate cache + n, go to
976  * the ring directly. If that fails, we are truly out of
977  * buffers.
978  */
979  goto ring_dequeue;
980  }
981 
982  cache->len += req;
983  }
984 
985  /* Now fill in the response ... */
986  for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
987  *obj_table = cache_objs[len];
988 
989  cache->len -= n;
990 
991  __MEMPOOL_STAT_ADD(mp, get_success, n);
992 
993  return 0;
994 
995 ring_dequeue:
996 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
997 
998  /* get remaining objects from ring */
999  if (is_mc)
1000  ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
1001  else
1002  ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
1003 
1004  if (ret < 0)
1005  __MEMPOOL_STAT_ADD(mp, get_fail, n);
1006  else
1007  __MEMPOOL_STAT_ADD(mp, get_success, n);
1008 
1009  return ret;
1010 }
1011 
1030 static inline int __attribute__((always_inline))
1031 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1032 {
1033  int ret;
1034  ret = __mempool_get_bulk(mp, obj_table, n, 1);
1035  if (ret == 0)
1036  __mempool_check_cookies(mp, obj_table, n, 1);
1037  return ret;
1038 }
1039 
1059 static inline int __attribute__((always_inline))
1060 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1061 {
1062  int ret;
1063  ret = __mempool_get_bulk(mp, obj_table, n, 0);
1064  if (ret == 0)
1065  __mempool_check_cookies(mp, obj_table, n, 1);
1066  return ret;
1067 }
1068 
1091 static inline int __attribute__((always_inline))
1092 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1093 {
1094  int ret;
1095  ret = __mempool_get_bulk(mp, obj_table, n,
1096  !(mp->flags & MEMPOOL_F_SC_GET));
1097  if (ret == 0)
1098  __mempool_check_cookies(mp, obj_table, n, 1);
1099  return ret;
1100 }
1101 
1118 static inline int __attribute__((always_inline))
1119 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
1120 {
1121  return rte_mempool_mc_get_bulk(mp, obj_p, 1);
1122 }
1123 
1140 static inline int __attribute__((always_inline))
1141 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1142 {
1143  return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1144 }
1145 
1166 static inline int __attribute__((always_inline))
1167 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1168 {
1169  return rte_mempool_get_bulk(mp, obj_p, 1);
1170 }
1171 
1184 unsigned rte_mempool_count(const struct rte_mempool *mp);
1185 
1203 static inline unsigned
1205 {
1206  return mp->size - rte_mempool_count(mp);
1207 }
1208 
1222 static inline int
1224 {
1225  return !!(rte_mempool_count(mp) == mp->size);
1226 }
1227 
1241 static inline int
1243 {
1244  return !!(rte_mempool_count(mp) == 0);
1245 }
1246 
1257 static inline phys_addr_t
1258 rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
1259 {
1260  if (rte_eal_has_hugepages()) {
1261  uintptr_t off;
1262 
1263  off = (const char *)elt - (const char *)mp->elt_va_start;
1264  return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask));
1265  } else {
1266  /*
1267  * If huge pages are disabled, we cannot assume the
1268  * memory region to be physically contiguous.
1269  * Lookup for each element.
1270  */
1271  return rte_mem_virt2phy(elt);
1272  }
1273 }
1274 
1285 void rte_mempool_audit(const struct rte_mempool *mp);
1286 
1295 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1296 {
1297  return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num);
1298 }
1299 
1306 void rte_mempool_list_dump(FILE *f);
1307 
1320 struct rte_mempool *rte_mempool_lookup(const char *name);
1321 
1339 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1340  struct rte_mempool_objsz *sz);
1341 
1362 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
1363  uint32_t pg_shift);
1364 
1391 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
1392  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
1393 
1402 void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
1403  void *arg);
1404 
1405 #ifdef __cplusplus
1406 }
1407 #endif
1408 
1409 #endif /* _RTE_MEMPOOL_H_ */