42#include <rte_config.h>
59#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
60#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
61#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
63#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
70struct rte_mempool_debug_stats {
73 uint64_t put_common_pool_bulk;
74 uint64_t put_common_pool_objs;
75 uint64_t get_common_pool_bulk;
76 uint64_t get_common_pool_objs;
77 uint64_t get_success_bulk;
78 uint64_t get_success_objs;
79 uint64_t get_fail_bulk;
80 uint64_t get_fail_objs;
81 uint64_t get_success_blks;
82 uint64_t get_fail_blks;
97 void *
objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3];
112#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
113 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
114#define RTE_MEMPOOL_MZ_PREFIX "MP_"
117#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
119#define MEMPOOL_PG_SHIFT_MAX \
120 RTE_DEPRECATED(MEMPOOL_PG_SHIFT_MAX) (sizeof(uintptr_t) * CHAR_BIT - 1)
123#define MEMPOOL_PG_NUM_DEFAULT RTE_DEPRECATED(MEMPOOL_PG_NUM_DEFAULT) 1
125#ifndef RTE_MEMPOOL_ALIGN
129#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
132#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
147#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
157#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
165struct rte_mempool_objtlr {
213 char name[RTE_MEMPOOL_NAMESIZE];
248#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
250 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
255#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
260#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
262#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
267#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
269#define RTE_MEMPOOL_F_SP_PUT 0x0004
274#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
276#define RTE_MEMPOOL_F_SC_GET 0x0008
281#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
283#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
285#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
290#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
292#define RTE_MEMPOOL_F_NON_IO 0x0040
297#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
298 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
299 | RTE_MEMPOOL_F_SP_PUT \
300 | RTE_MEMPOOL_F_SC_GET \
301 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
313#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
314#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
315 unsigned __lcore_id = rte_lcore_id(); \
316 if (__lcore_id < RTE_MAX_LCORE) { \
317 mp->stats[__lcore_id].name += n; \
321#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
332#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
333 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
334 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
337#define MEMPOOL_HEADER_SIZE(mp, cs) \
338 RTE_DEPRECATED(MEMPOOL_HEADER_SIZE) RTE_MEMPOOL_HEADER_SIZE(mp, cs)
342rte_mempool_get_header(
void *obj)
364static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(
void *obj)
384void rte_mempool_check_cookies(
const struct rte_mempool *mp,
385 void *
const *obj_table_const,
unsigned n,
int free);
387#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
388#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
389 rte_mempool_check_cookies(mp, obj_table_const, n, free)
391#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
409void rte_mempool_contig_blocks_check_cookies(
const struct rte_mempool *mp,
410 void *
const *first_obj_table_const,
unsigned int n,
int free);
412#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
413#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
415 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
418#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
423#define RTE_MEMPOOL_OPS_NAMESIZE 32
446 void *
const *obj_table,
unsigned int n);
452 void **obj_table,
unsigned int n);
458 void **first_obj_table,
unsigned int n);
489 uint32_t obj_num, uint32_t pg_shift,
490 size_t *min_chunk_size,
size_t *align);
527ssize_t rte_mempool_op_calc_mem_size_helper(
const struct rte_mempool *mp,
528 uint32_t obj_num, uint32_t pg_shift,
size_t chunk_reserve,
529 size_t *min_chunk_size,
size_t *align);
539 uint32_t obj_num, uint32_t pg_shift,
540 size_t *min_chunk_size,
size_t *align);
586 unsigned int max_objs,
593#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
627int rte_mempool_op_populate_helper(
struct rte_mempool *mp,
628 unsigned int flags,
unsigned int max_objs,
639 unsigned int max_objs,
678#define RTE_MEMPOOL_MAX_OPS_IDX 16
711rte_mempool_get_ops(
int ops_index)
744rte_mempool_ops_dequeue_bulk(
struct rte_mempool *mp,
745 void **obj_table,
unsigned n)
750 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
751 ops = rte_mempool_get_ops(mp->
ops_index);
752 ret = ops->
dequeue(mp, obj_table, n);
754 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
755 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
774rte_mempool_ops_dequeue_contig_blocks(
struct rte_mempool *mp,
775 void **first_obj_table,
unsigned int n)
779 ops = rte_mempool_get_ops(mp->
ops_index);
781 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
799rte_mempool_ops_enqueue_bulk(
struct rte_mempool *mp,
void *
const *obj_table,
804 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
805 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
806 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
807 ops = rte_mempool_get_ops(mp->
ops_index);
808 return ops->
enqueue(mp, obj_table, n);
820rte_mempool_ops_get_count(
const struct rte_mempool *mp);
841ssize_t rte_mempool_ops_calc_mem_size(
const struct rte_mempool *mp,
842 uint32_t obj_num, uint32_t pg_shift,
843 size_t *min_chunk_size,
size_t *align);
868int rte_mempool_ops_populate(
struct rte_mempool *mp,
unsigned int max_objs,
935#define RTE_MEMPOOL_REGISTER_OPS(ops) \
936 RTE_INIT(mp_hdlr_init_##ops) \
938 rte_mempool_register_ops(&ops); \
942#define MEMPOOL_REGISTER_OPS(ops) \
943 RTE_DEPRECATED(MEMPOOL_REGISTER_OPS) RTE_MEMPOOL_REGISTER_OPS(ops)
951 void *opaque,
void *obj,
unsigned obj_idx);
1292 if (lcore_id >= RTE_MAX_LCORE)
1295 rte_mempool_trace_default_cache(mp, lcore_id,
1314 if (cache == NULL || cache->
len == 0)
1316 rte_mempool_trace_cache_flush(cache, mp);
1317 rte_mempool_ops_enqueue_bulk(mp, cache->
objs, cache->
len);
1334rte_mempool_do_generic_put(
struct rte_mempool *mp,
void *
const *obj_table,
1340 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1341 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1344 if (
unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
1347 cache_objs = &cache->
objs[cache->
len];
1357 rte_memcpy(&cache_objs[0], obj_table,
sizeof(
void *) * n);
1362 rte_mempool_ops_enqueue_bulk(mp, &cache->
objs[cache->
size],
1372#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1373 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
1374 rte_panic(
"cannot put objects in mempool\n");
1376 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1397 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1398 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1399 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1422 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1459rte_mempool_do_generic_get(
struct rte_mempool *mp,
void **obj_table,
1463 uint32_t index,
len;
1470 cache_objs = cache->
objs;
1473 if (cache->
len < n) {
1475 uint32_t req = n + (cache->
size - cache->
len);
1478 ret = rte_mempool_ops_dequeue_bulk(mp,
1479 &cache->
objs[cache->
len], req);
1494 for (index = 0,
len = cache->
len - 1; index < n; ++index,
len--, obj_table++)
1495 *obj_table = cache_objs[
len];
1499 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1500 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1507 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
1510 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1511 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1513 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1514 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1545 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1547 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1548 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1579 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1632 void **first_obj_table,
unsigned int n)
1636 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1638 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1639 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1640 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1643 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1644 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1647 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1821rte_mempool_get_page_size(
struct rte_mempool *mp,
size_t *pg_sz);
1843typedef void (rte_mempool_event_callback)(
1864rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1882rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
#define __rte_cache_aligned
#define RTE_PTR_SUB(ptr, x)
#define RTE_PTR_ADD(ptr, x)
#define __rte_always_inline
static unsigned rte_lcore_id(void)
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
void rte_mempool_free(struct rte_mempool *mp)
@ RTE_MEMPOOL_EVENT_DESTROY
@ RTE_MEMPOOL_EVENT_READY
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
void(* rte_mempool_free_t)(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
static int rte_mempool_full(const struct rte_mempool *mp)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void rte_mempool_list_dump(FILE *f)
#define RTE_MEMPOOL_MAX_OPS_IDX
static int rte_mempool_empty(const struct rte_mempool *mp)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
void * objs[RTE_MEMPOOL_CACHE_MAX_SIZE *3]
unsigned int contig_block_size
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
rte_mempool_memchunk_free_cb_t * free_cb
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
char name[RTE_MEMPOOL_OPS_NAMESIZE]
rte_mempool_alloc_t alloc
rte_mempool_dequeue_t dequeue
rte_mempool_get_info_t get_info
rte_mempool_calc_mem_size_t calc_mem_size
rte_mempool_get_count get_count
rte_mempool_populate_t populate
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
rte_mempool_enqueue_t enqueue
const struct rte_memzone * mz
struct rte_mempool_memhdr_list mem_list
char name[RTE_MEMPOOL_NAMESIZE]
unsigned private_data_size
struct rte_mempool_cache * local_cache
struct rte_mempool_objhdr_list elt_list