DPDK  16.04.0
rte_mempool.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_MEMPOOL_H_
35 #define _RTE_MEMPOOL_H_
36 
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <stdint.h>
65 #include <errno.h>
66 #include <inttypes.h>
67 #include <sys/queue.h>
68 
69 #include <rte_log.h>
70 #include <rte_debug.h>
71 #include <rte_lcore.h>
72 #include <rte_memory.h>
73 #include <rte_branch_prediction.h>
74 #include <rte_ring.h>
75 
76 #ifdef __cplusplus
77 extern "C" {
78 #endif
79 
80 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
81 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
82 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
84 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
85 
88 struct rte_mempool_debug_stats {
89  uint64_t put_bulk;
90  uint64_t put_objs;
91  uint64_t get_success_bulk;
92  uint64_t get_success_objs;
93  uint64_t get_fail_bulk;
94  uint64_t get_fail_objs;
96 #endif
97 
98 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
99 
102 struct rte_mempool_cache {
103  unsigned len;
104  /*
105  * Cache is allocated to this size to allow it to overflow in certain
106  * cases to avoid needless emptying of cache.
107  */
108  void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
110 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
111 
116  uint32_t elt_size;
117  uint32_t header_size;
118  uint32_t trailer_size;
119  uint32_t total_size;
121 };
122 
123 #define RTE_MEMPOOL_NAMESIZE 32
124 #define RTE_MEMPOOL_MZ_PREFIX "MP_"
125 
126 /* "MP_<name>" */
127 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
128 
129 #ifdef RTE_LIBRTE_XEN_DOM0
130 
131 /* "<name>_MP_elt" */
132 #define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
133 
134 #else
135 
136 #define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
137 
138 #endif /* RTE_LIBRTE_XEN_DOM0 */
139 
140 #define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
141 
143 #define MEMPOOL_PG_NUM_DEFAULT 1
144 
145 #ifndef RTE_MEMPOOL_ALIGN
146 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
147 #endif
148 
149 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
150 
160  struct rte_mempool *mp;
161 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
162  uint64_t cookie;
163 #endif
164 };
165 
173 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
174  uint64_t cookie;
175 #endif
176 };
177 
181 struct rte_mempool {
183  struct rte_ring *ring;
185  int flags;
186  uint32_t size;
187  uint32_t cache_size;
188  uint32_t cache_flushthresh;
191  uint32_t elt_size;
192  uint32_t header_size;
193  uint32_t trailer_size;
195  unsigned private_data_size;
197 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
198 
199  struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
200 #endif
201 
202 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
203 
204  struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
205 #endif
206 
207  /* Address translation support, starts from next cache line. */
208 
210  uint32_t pg_num __rte_cache_aligned;
211  uint32_t pg_shift;
212  uintptr_t pg_mask;
213  uintptr_t elt_va_start;
215  uintptr_t elt_va_end;
221 
222 #define MEMPOOL_F_NO_SPREAD 0x0001
223 #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
224 #define MEMPOOL_F_SP_PUT 0x0004
225 #define MEMPOOL_F_SC_GET 0x0008
237 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
238 #define __MEMPOOL_STAT_ADD(mp, name, n) do { \
239  unsigned __lcore_id = rte_lcore_id(); \
240  if (__lcore_id < RTE_MAX_LCORE) { \
241  mp->stats[__lcore_id].name##_objs += n; \
242  mp->stats[__lcore_id].name##_bulk += 1; \
243  } \
244  } while(0)
245 #else
246 #define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
247 #endif
248 
257 #define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
258  RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
259  sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
260 
264 #define MEMPOOL_IS_CONTIG(mp) \
265  ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
266  (mp)->phys_addr == (mp)->elt_pa[0])
267 
268 /* return the header of a mempool object (internal) */
269 static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
270 {
271  return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr));
272 }
273 
283 static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
284 {
285  struct rte_mempool_objhdr *hdr = __mempool_get_header(obj);
286  return hdr->mp;
287 }
288 
289 /* return the trailer of a mempool object (internal) */
290 static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
291 {
292  struct rte_mempool *mp = rte_mempool_from_obj(obj);
293  return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
294 }
295 
310 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
311 #ifndef __INTEL_COMPILER
312 #pragma GCC diagnostic ignored "-Wcast-qual"
313 #endif
314 static inline void __mempool_check_cookies(const struct rte_mempool *mp,
315  void * const *obj_table_const,
316  unsigned n, int free)
317 {
318  struct rte_mempool_objhdr *hdr;
319  struct rte_mempool_objtlr *tlr;
320  uint64_t cookie;
321  void *tmp;
322  void *obj;
323  void **obj_table;
324 
325  /* Force to drop the "const" attribute. This is done only when
326  * DEBUG is enabled */
327  tmp = (void *) obj_table_const;
328  obj_table = (void **) tmp;
329 
330  while (n--) {
331  obj = obj_table[n];
332 
333  if (rte_mempool_from_obj(obj) != mp)
334  rte_panic("MEMPOOL: object is owned by another "
335  "mempool\n");
336 
337  hdr = __mempool_get_header(obj);
338  cookie = hdr->cookie;
339 
340  if (free == 0) {
341  if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
343  RTE_LOG(CRIT, MEMPOOL,
344  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
345  obj, (const void *) mp, cookie);
346  rte_panic("MEMPOOL: bad header cookie (put)\n");
347  }
348  hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
349  }
350  else if (free == 1) {
351  if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
353  RTE_LOG(CRIT, MEMPOOL,
354  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
355  obj, (const void *) mp, cookie);
356  rte_panic("MEMPOOL: bad header cookie (get)\n");
357  }
358  hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
359  }
360  else if (free == 2) {
361  if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
362  cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
364  RTE_LOG(CRIT, MEMPOOL,
365  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
366  obj, (const void *) mp, cookie);
367  rte_panic("MEMPOOL: bad header cookie (audit)\n");
368  }
369  }
370  tlr = __mempool_get_trailer(obj);
371  cookie = tlr->cookie;
372  if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
374  RTE_LOG(CRIT, MEMPOOL,
375  "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
376  obj, (const void *) mp, cookie);
377  rte_panic("MEMPOOL: bad trailer cookie\n");
378  }
379  }
380 }
381 #ifndef __INTEL_COMPILER
382 #pragma GCC diagnostic error "-Wcast-qual"
383 #endif
384 #else
385 #define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
386 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
387 
391 typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
392  void * /*obj_start*/,
393  void * /*obj_end*/,
394  uint32_t /*obj_index */);
395 
431 uint32_t rte_mempool_obj_iter(void *vaddr,
432  uint32_t elt_num, size_t elt_sz, size_t align,
433  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
434  rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg);
435 
443 typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
444  void *, unsigned);
445 
452 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
453 
535 struct rte_mempool *
536 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
537  unsigned cache_size, unsigned private_data_size,
538  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
539  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
540  int socket_id, unsigned flags);
541 
635 struct rte_mempool *
636 rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
637  unsigned cache_size, unsigned private_data_size,
638  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
639  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
640  int socket_id, unsigned flags, void *vaddr,
641  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
642 
724 struct rte_mempool *
725 rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
726  unsigned cache_size, unsigned private_data_size,
727  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
728  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
729  int socket_id, unsigned flags);
730 
731 
740 void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
741 
754 static inline void __attribute__((always_inline))
755 __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
756  unsigned n, int is_mp)
757 {
758 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
759  struct rte_mempool_cache *cache;
760  uint32_t index;
761  void **cache_objs;
762  unsigned lcore_id = rte_lcore_id();
763  uint32_t cache_size = mp->cache_size;
764  uint32_t flushthresh = mp->cache_flushthresh;
765 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
766 
767  /* increment stat now, adding in mempool always success */
768  __MEMPOOL_STAT_ADD(mp, put, n);
769 
770 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
771  /* cache is not enabled or single producer or non-EAL thread */
772  if (unlikely(cache_size == 0 || is_mp == 0 ||
773  lcore_id >= RTE_MAX_LCORE))
774  goto ring_enqueue;
775 
776  /* Go straight to ring if put would overflow mem allocated for cache */
777  if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
778  goto ring_enqueue;
779 
780  cache = &mp->local_cache[lcore_id];
781  cache_objs = &cache->objs[cache->len];
782 
783  /*
784  * The cache follows the following algorithm
785  * 1. Add the objects to the cache
786  * 2. Anything greater than the cache min value (if it crosses the
787  * cache flush threshold) is flushed to the ring.
788  */
789 
790  /* Add elements back into the cache */
791  for (index = 0; index < n; ++index, obj_table++)
792  cache_objs[index] = *obj_table;
793 
794  cache->len += n;
795 
796  if (cache->len >= flushthresh) {
797  rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
798  cache->len - cache_size);
799  cache->len = cache_size;
800  }
801 
802  return;
803 
804 ring_enqueue:
805 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
806 
807  /* push remaining objects in ring */
808 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
809  if (is_mp) {
810  if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
811  rte_panic("cannot put objects in mempool\n");
812  }
813  else {
814  if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
815  rte_panic("cannot put objects in mempool\n");
816  }
817 #else
818  if (is_mp)
819  rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
820  else
821  rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
822 #endif
823 }
824 
825 
836 static inline void __attribute__((always_inline))
837 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
838  unsigned n)
839 {
840  __mempool_check_cookies(mp, obj_table, n, 0);
841  __mempool_put_bulk(mp, obj_table, n, 1);
842 }
843 
854 static inline void
855 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
856  unsigned n)
857 {
858  __mempool_check_cookies(mp, obj_table, n, 0);
859  __mempool_put_bulk(mp, obj_table, n, 0);
860 }
861 
876 static inline void __attribute__((always_inline))
877 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
878  unsigned n)
879 {
880  __mempool_check_cookies(mp, obj_table, n, 0);
881  __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
882 }
883 
892 static inline void __attribute__((always_inline))
893 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
894 {
895  rte_mempool_mp_put_bulk(mp, &obj, 1);
896 }
897 
906 static inline void __attribute__((always_inline))
907 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
908 {
909  rte_mempool_sp_put_bulk(mp, &obj, 1);
910 }
911 
924 static inline void __attribute__((always_inline))
925 rte_mempool_put(struct rte_mempool *mp, void *obj)
926 {
927  rte_mempool_put_bulk(mp, &obj, 1);
928 }
929 
944 static inline int __attribute__((always_inline))
945 __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
946  unsigned n, int is_mc)
947 {
948  int ret;
949 #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
950  struct rte_mempool_cache *cache;
951  uint32_t index, len;
952  void **cache_objs;
953  unsigned lcore_id = rte_lcore_id();
954  uint32_t cache_size = mp->cache_size;
955 
956  /* cache is not enabled or single consumer */
957  if (unlikely(cache_size == 0 || is_mc == 0 ||
958  n >= cache_size || lcore_id >= RTE_MAX_LCORE))
959  goto ring_dequeue;
960 
961  cache = &mp->local_cache[lcore_id];
962  cache_objs = cache->objs;
963 
964  /* Can this be satisfied from the cache? */
965  if (cache->len < n) {
966  /* No. Backfill the cache first, and then fill from it */
967  uint32_t req = n + (cache_size - cache->len);
968 
969  /* How many do we require i.e. number to fill the cache + the request */
970  ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
971  if (unlikely(ret < 0)) {
972  /*
973  * In the offchance that we are buffer constrained,
974  * where we are not able to allocate cache + n, go to
975  * the ring directly. If that fails, we are truly out of
976  * buffers.
977  */
978  goto ring_dequeue;
979  }
980 
981  cache->len += req;
982  }
983 
984  /* Now fill in the response ... */
985  for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
986  *obj_table = cache_objs[len];
987 
988  cache->len -= n;
989 
990  __MEMPOOL_STAT_ADD(mp, get_success, n);
991 
992  return 0;
993 
994 ring_dequeue:
995 #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
996 
997  /* get remaining objects from ring */
998  if (is_mc)
999  ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
1000  else
1001  ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
1002 
1003  if (ret < 0)
1004  __MEMPOOL_STAT_ADD(mp, get_fail, n);
1005  else
1006  __MEMPOOL_STAT_ADD(mp, get_success, n);
1007 
1008  return ret;
1009 }
1010 
1029 static inline int __attribute__((always_inline))
1030 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1031 {
1032  int ret;
1033  ret = __mempool_get_bulk(mp, obj_table, n, 1);
1034  if (ret == 0)
1035  __mempool_check_cookies(mp, obj_table, n, 1);
1036  return ret;
1037 }
1038 
1058 static inline int __attribute__((always_inline))
1059 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1060 {
1061  int ret;
1062  ret = __mempool_get_bulk(mp, obj_table, n, 0);
1063  if (ret == 0)
1064  __mempool_check_cookies(mp, obj_table, n, 1);
1065  return ret;
1066 }
1067 
1090 static inline int __attribute__((always_inline))
1091 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
1092 {
1093  int ret;
1094  ret = __mempool_get_bulk(mp, obj_table, n,
1095  !(mp->flags & MEMPOOL_F_SC_GET));
1096  if (ret == 0)
1097  __mempool_check_cookies(mp, obj_table, n, 1);
1098  return ret;
1099 }
1100 
1117 static inline int __attribute__((always_inline))
1118 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
1119 {
1120  return rte_mempool_mc_get_bulk(mp, obj_p, 1);
1121 }
1122 
1139 static inline int __attribute__((always_inline))
1140 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
1141 {
1142  return rte_mempool_sc_get_bulk(mp, obj_p, 1);
1143 }
1144 
1165 static inline int __attribute__((always_inline))
1166 rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1167 {
1168  return rte_mempool_get_bulk(mp, obj_p, 1);
1169 }
1170 
1183 unsigned rte_mempool_count(const struct rte_mempool *mp);
1184 
1202 static inline unsigned
1204 {
1205  return mp->size - rte_mempool_count(mp);
1206 }
1207 
1221 static inline int
1223 {
1224  return !!(rte_mempool_count(mp) == mp->size);
1225 }
1226 
1240 static inline int
1242 {
1243  return !!(rte_mempool_count(mp) == 0);
1244 }
1245 
1256 static inline phys_addr_t
1257 rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
1258 {
1259  if (rte_eal_has_hugepages()) {
1260  uintptr_t off;
1261 
1262  off = (const char *)elt - (const char *)mp->elt_va_start;
1263  return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
1264  } else {
1265  /*
1266  * If huge pages are disabled, we cannot assume the
1267  * memory region to be physically contiguous.
1268  * Lookup for each element.
1269  */
1270  return rte_mem_virt2phy(elt);
1271  }
1272 }
1273 
1284 void rte_mempool_audit(const struct rte_mempool *mp);
1285 
1294 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1295 {
1296  return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num);
1297 }
1298 
1305 void rte_mempool_list_dump(FILE *f);
1306 
1319 struct rte_mempool *rte_mempool_lookup(const char *name);
1320 
1338 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1339  struct rte_mempool_objsz *sz);
1340 
1361 size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
1362  uint32_t pg_shift);
1363 
1390 ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
1391  const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
1392 
1401 void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
1402  void *arg);
1403 
1404 #ifdef __cplusplus
1405 }
1406 #endif
1407 
1408 #endif /* _RTE_MEMPOOL_H_ */