DPDK 22.11.0-rc2
rte_mempool.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 */
5
6#ifndef _RTE_MEMPOOL_H_
7#define _RTE_MEMPOOL_H_
8
36#include <stdio.h>
37#include <stdint.h>
38#include <inttypes.h>
39
40#include <rte_config.h>
41#include <rte_spinlock.h>
42#include <rte_debug.h>
43#include <rte_lcore.h>
45#include <rte_ring.h>
46#include <rte_memcpy.h>
47#include <rte_common.h>
48
50
51#ifdef __cplusplus
52extern "C" {
53#endif
54
55#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
56#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
57#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
59#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
66struct rte_mempool_debug_stats {
67 uint64_t put_bulk;
68 uint64_t put_objs;
69 uint64_t put_common_pool_bulk;
70 uint64_t put_common_pool_objs;
71 uint64_t get_common_pool_bulk;
72 uint64_t get_common_pool_objs;
73 uint64_t get_success_bulk;
74 uint64_t get_success_objs;
75 uint64_t get_fail_bulk;
76 uint64_t get_fail_objs;
77 uint64_t get_success_blks;
78 uint64_t get_fail_blks;
80#endif
81
86 uint32_t size;
87 uint32_t flushthresh;
88 uint32_t len;
95 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2] __rte_cache_aligned;
97
102 uint32_t elt_size;
103 uint32_t header_size;
104 uint32_t trailer_size;
105 uint32_t total_size;
107};
108
110#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
111 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
112#define RTE_MEMPOOL_MZ_PREFIX "MP_"
113
114/* "MP_<name>" */
115#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
116
117#ifndef RTE_MEMPOOL_ALIGN
121#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
122#endif
123
124#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
125
137 struct rte_mempool *mp;
139#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
140 uint64_t cookie;
141#endif
142};
143
147RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
148
149#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
150
157struct rte_mempool_objtlr {
158 uint64_t cookie;
159};
160
161#endif
162
166RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
167
172 void *opaque);
173
182 struct rte_mempool *mp;
183 void *addr;
185 size_t len;
187 void *opaque;
188};
189
198 unsigned int contig_block_size;
200
205 char name[RTE_MEMPOOL_NAMESIZE];
207 union {
208 void *pool_data;
209 uint64_t pool_id;
210 };
212 const struct rte_memzone *mz;
213 unsigned int flags;
215 uint32_t size;
216 uint32_t cache_size;
219 uint32_t elt_size;
220 uint32_t header_size;
221 uint32_t trailer_size;
231 int32_t ops_index;
232
235 uint32_t populated_size;
236 struct rte_mempool_objhdr_list elt_list;
237 uint32_t nb_mem_chunks;
238 struct rte_mempool_memhdr_list mem_list;
240#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
242 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
243#endif
245
247#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
252#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
254#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
259#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
261#define RTE_MEMPOOL_F_SP_PUT 0x0004
266#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
268#define RTE_MEMPOOL_F_SC_GET 0x0008
273#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
275#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
277#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
282#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
284#define RTE_MEMPOOL_F_NON_IO 0x0040
285
289#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
290 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
291 | RTE_MEMPOOL_F_SP_PUT \
292 | RTE_MEMPOOL_F_SC_GET \
293 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
294 )
305#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
306#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
307 unsigned __lcore_id = rte_lcore_id(); \
308 if (__lcore_id < RTE_MAX_LCORE) { \
309 mp->stats[__lcore_id].name += n; \
310 } \
311 } while (0)
312#else
313#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
314#endif
315
324#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
325 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
326 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
327
328/* return the header of a mempool object (internal) */
329static inline struct rte_mempool_objhdr *
330rte_mempool_get_header(void *obj)
331{
332 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
333 sizeof(struct rte_mempool_objhdr));
334}
335
345static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
346{
347 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
348 return hdr->mp;
349}
350
351/* return the trailer of a mempool object (internal) */
352static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
353{
354 struct rte_mempool *mp = rte_mempool_from_obj(obj);
355 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
356}
357
372void rte_mempool_check_cookies(const struct rte_mempool *mp,
373 void * const *obj_table_const, unsigned n, int free);
374
375#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
376#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
377 rte_mempool_check_cookies(mp, obj_table_const, n, free)
378#else
379#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
380#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
381
397void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
398 void * const *first_obj_table_const, unsigned int n, int free);
399
400#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
401#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
402 free) \
403 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
404 free)
405#else
406#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
407 free) \
408 do {} while (0)
409#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
410
411#define RTE_MEMPOOL_OPS_NAMESIZE 32
423typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
424
428typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
429
433typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
434 void * const *obj_table, unsigned int n);
435
439typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
440 void **obj_table, unsigned int n);
441
446 void **first_obj_table, unsigned int n);
447
451typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
452
476typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
477 uint32_t obj_num, uint32_t pg_shift,
478 size_t *min_chunk_size, size_t *align);
479
515ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
516 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
517 size_t *min_chunk_size, size_t *align);
518
527 uint32_t obj_num, uint32_t pg_shift,
528 size_t *min_chunk_size, size_t *align);
529
543 void *opaque, void *vaddr, rte_iova_t iova);
544
573typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
574 unsigned int max_objs,
575 void *vaddr, rte_iova_t iova, size_t len,
576 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
577
581#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
582
615int rte_mempool_op_populate_helper(struct rte_mempool *mp,
616 unsigned int flags, unsigned int max_objs,
617 void *vaddr, rte_iova_t iova, size_t len,
618 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
619
627 unsigned int max_objs,
628 void *vaddr, rte_iova_t iova, size_t len,
629 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
630
634typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
635 struct rte_mempool_info *info);
636
637
665
666#define RTE_MEMPOOL_MAX_OPS_IDX 16
679 uint32_t num_ops;
685
688
698static inline struct rte_mempool_ops *
699rte_mempool_get_ops(int ops_index)
700{
701 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
702
703 return &rte_mempool_ops_table.ops[ops_index];
704}
705
715int
716rte_mempool_ops_alloc(struct rte_mempool *mp);
717
731static inline int
732rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
733 void **obj_table, unsigned n)
734{
735 struct rte_mempool_ops *ops;
736 int ret;
737
738 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
739 ops = rte_mempool_get_ops(mp->ops_index);
740 ret = ops->dequeue(mp, obj_table, n);
741 if (ret == 0) {
742 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
743 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
744 }
745 return ret;
746}
747
761static inline int
762rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
763 void **first_obj_table, unsigned int n)
764{
765 struct rte_mempool_ops *ops;
766
767 ops = rte_mempool_get_ops(mp->ops_index);
768 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
769 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
770 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
771}
772
786static inline int
787rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
788 unsigned n)
789{
790 struct rte_mempool_ops *ops;
791 int ret;
792
793 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
794 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
795 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
796 ops = rte_mempool_get_ops(mp->ops_index);
797 ret = ops->enqueue(mp, obj_table, n);
798#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
799 if (unlikely(ret < 0))
800 RTE_LOG(CRIT, MEMPOOL, "cannot enqueue %u objects to mempool %s\n",
801 n, mp->name);
802#endif
803 return ret;
804}
805
814unsigned
815rte_mempool_ops_get_count(const struct rte_mempool *mp);
816
836ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
837 uint32_t obj_num, uint32_t pg_shift,
838 size_t *min_chunk_size, size_t *align);
839
863int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
864 void *vaddr, rte_iova_t iova, size_t len,
866 void *obj_cb_arg);
867
881 struct rte_mempool_info *info);
882
889void
890rte_mempool_ops_free(struct rte_mempool *mp);
891
909int
911 void *pool_config);
912
924
930#define RTE_MEMPOOL_REGISTER_OPS(ops) \
931 RTE_INIT(mp_hdlr_init_##ops) \
932 { \
933 rte_mempool_register_ops(&ops); \
934 }
935
941typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
942 void *opaque, void *obj, unsigned obj_idx);
943typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
944
950typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
951 void *opaque, struct rte_mempool_memhdr *memhdr,
952 unsigned mem_idx);
953
960typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
961
1040struct rte_mempool *
1041rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1042 unsigned cache_size, unsigned private_data_size,
1043 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1044 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1045 int socket_id, unsigned flags);
1046
1081struct rte_mempool *
1082rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1083 unsigned cache_size, unsigned private_data_size,
1084 int socket_id, unsigned flags);
1096void
1098
1129int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1130 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1131 void *opaque);
1132
1159int
1161 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1162 void *opaque);
1163
1178
1193
1210 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1211
1228 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1229
1238void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1239
1254struct rte_mempool_cache *
1255rte_mempool_cache_create(uint32_t size, int socket_id);
1256
1263void
1265
1278rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1279{
1280 if (mp->cache_size == 0)
1281 return NULL;
1282
1283 if (lcore_id >= RTE_MAX_LCORE)
1284 return NULL;
1285
1286 rte_mempool_trace_default_cache(mp, lcore_id,
1287 &mp->local_cache[lcore_id]);
1288 return &mp->local_cache[lcore_id];
1289}
1290
1299static __rte_always_inline void
1301 struct rte_mempool *mp)
1302{
1303 if (cache == NULL)
1305 if (cache == NULL || cache->len == 0)
1306 return;
1307 rte_mempool_trace_cache_flush(cache, mp);
1308 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1309 cache->len = 0;
1310}
1311
1324static __rte_always_inline void
1325rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1326 unsigned int n, struct rte_mempool_cache *cache)
1327{
1328 void **cache_objs;
1329
1330 /* increment stat now, adding in mempool always success */
1331 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1332 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1333
1334 /* No cache provided or the request itself is too big for the cache */
1335 if (unlikely(cache == NULL || n > cache->flushthresh))
1336 goto driver_enqueue;
1337
1338 /*
1339 * The cache follows the following algorithm:
1340 * 1. If the objects cannot be added to the cache without crossing
1341 * the flush threshold, flush the cache to the backend.
1342 * 2. Add the objects to the cache.
1343 */
1344
1345 if (cache->len + n <= cache->flushthresh) {
1346 cache_objs = &cache->objs[cache->len];
1347 cache->len += n;
1348 } else {
1349 cache_objs = &cache->objs[0];
1350 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1351 cache->len = n;
1352 }
1353
1354 /* Add the objects to the cache. */
1355 rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1356
1357 return;
1358
1359driver_enqueue:
1360
1361 /* push objects to the backend */
1362 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1363}
1364
1365
1378static __rte_always_inline void
1379rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1380 unsigned int n, struct rte_mempool_cache *cache)
1381{
1382 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1383 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1384 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1385}
1386
1401static __rte_always_inline void
1402rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1403 unsigned int n)
1404{
1405 struct rte_mempool_cache *cache;
1407 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1408 rte_mempool_generic_put(mp, obj_table, n, cache);
1409}
1410
1423static __rte_always_inline void
1424rte_mempool_put(struct rte_mempool *mp, void *obj)
1425{
1426 rte_mempool_put_bulk(mp, &obj, 1);
1427}
1428
1443static __rte_always_inline int
1444rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1445 unsigned int n, struct rte_mempool_cache *cache)
1446{
1447 int ret;
1448 unsigned int remaining = n;
1449 uint32_t index, len;
1450 void **cache_objs;
1451
1452 /* No cache provided */
1453 if (unlikely(cache == NULL))
1454 goto driver_dequeue;
1455
1456 /* Use the cache as much as we have to return hot objects first */
1457 len = RTE_MIN(remaining, cache->len);
1458 cache_objs = &cache->objs[cache->len];
1459 cache->len -= len;
1460 remaining -= len;
1461 for (index = 0; index < len; index++)
1462 *obj_table++ = *--cache_objs;
1463
1464 if (remaining == 0) {
1465 /* The entire request is satisfied from the cache. */
1466
1467 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1468 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1469
1470 return 0;
1471 }
1472
1473 /* if dequeue below would overflow mem allocated for cache */
1474 if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1475 goto driver_dequeue;
1476
1477 /* Fill the cache from the backend; fetch size + remaining objects. */
1478 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1479 cache->size + remaining);
1480 if (unlikely(ret < 0)) {
1481 /*
1482 * We are buffer constrained, and not able to allocate
1483 * cache + remaining.
1484 * Do not fill the cache, just satisfy the remaining part of
1485 * the request directly from the backend.
1486 */
1487 goto driver_dequeue;
1488 }
1489
1490 /* Satisfy the remaining part of the request from the filled cache. */
1491 cache_objs = &cache->objs[cache->size + remaining];
1492 for (index = 0; index < remaining; index++)
1493 *obj_table++ = *--cache_objs;
1494
1495 cache->len = cache->size;
1496
1497 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1498 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1499
1500 return 0;
1501
1502driver_dequeue:
1503
1504 /* Get remaining objects directly from the backend. */
1505 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1506
1507 if (ret < 0) {
1508 if (likely(cache != NULL)) {
1509 cache->len = n - remaining;
1510 /*
1511 * No further action is required to roll the first part
1512 * of the request back into the cache, as objects in
1513 * the cache are intact.
1514 */
1515 }
1516
1517 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1518 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1519 } else {
1520 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1521 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1522 }
1523
1524 return ret;
1525}
1526
1547static __rte_always_inline int
1548rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1549 unsigned int n, struct rte_mempool_cache *cache)
1550{
1551 int ret;
1552 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1553 if (ret == 0)
1554 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1555 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1556 return ret;
1557}
1558
1581static __rte_always_inline int
1582rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1583{
1584 struct rte_mempool_cache *cache;
1586 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1587 return rte_mempool_generic_get(mp, obj_table, n, cache);
1588}
1589
1610static __rte_always_inline int
1611rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1612{
1613 return rte_mempool_get_bulk(mp, obj_p, 1);
1614}
1615
1637static __rte_always_inline int
1639 void **first_obj_table, unsigned int n)
1640{
1641 int ret;
1642
1643 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1644 if (ret == 0) {
1645 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1646 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1647 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1648 1);
1649 } else {
1650 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1651 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1652 }
1653
1654 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1655 return ret;
1656}
1657
1670unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1671
1684unsigned int
1686
1700static inline int
1702{
1703 return rte_mempool_avail_count(mp) == mp->size;
1704}
1705
1719static inline int
1721{
1722 return rte_mempool_avail_count(mp) == 0;
1723}
1724
1735static inline rte_iova_t
1737{
1738 const struct rte_mempool_objhdr *hdr;
1739 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1740 sizeof(*hdr));
1741 return hdr->iova;
1742}
1743
1755
1764static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1765{
1766 return (char *)mp +
1767 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1768}
1769
1777
1791
1809uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1810 struct rte_mempool_objsz *sz);
1811
1820void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1821 void *arg);
1822
1827int
1828rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1829
1839};
1840
1850typedef void (rte_mempool_event_callback)(
1851 enum rte_mempool_event event,
1852 struct rte_mempool *mp,
1853 void *user_data);
1854
1871__rte_internal
1872int
1873rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1874 void *user_data);
1875
1889__rte_internal
1890int
1891rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1892 void *user_data);
1893
1894#ifdef __cplusplus
1895}
1896#endif
1897
1898#endif /* _RTE_MEMPOOL_H_ */
#define likely(x)
#define unlikely(x)
#define __rte_cache_aligned
Definition: rte_common.h:440
#define RTE_MIN(a, b)
Definition: rte_common.h:613
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:295
uint64_t rte_iova_t
Definition: rte_common.h:458
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:290
#define RTE_STD_C11
Definition: rte_common.h:39
#define __rte_always_inline
Definition: rte_common.h:255
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:78
#define RTE_LOG(l, t,...)
Definition: rte_log.h:336
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
Definition: rte_mempool.h:171
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1582
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
Definition: rte_mempool.h:1278
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
Definition: rte_mempool.h:941
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:433
static struct rte_mempool * rte_mempool_from_obj(void *obj)
Definition: rte_mempool.h:345
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1736
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:439
void rte_mempool_free(struct rte_mempool *mp)
rte_mempool_event
Definition: rte_mempool.h:1834
@ RTE_MEMPOOL_EVENT_DESTROY
Definition: rte_mempool.h:1838
@ RTE_MEMPOOL_EVENT_READY
Definition: rte_mempool.h:1836
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
Definition: rte_mempool.h:451
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
Definition: rte_mempool.h:542
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:1638
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
Definition: rte_mempool.h:445
void(* rte_mempool_free_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:428
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
Definition: rte_mempool.h:1300
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:1402
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1611
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1379
static int rte_mempool_full(const struct rte_mempool *mp)
Definition: rte_mempool.h:1701
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
Definition: rte_mempool.h:423
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
Definition: rte_mempool.h:1548
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
Definition: rte_mempool.h:411
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
Definition: rte_mempool.h:960
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
Definition: rte_mempool.h:950
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
Definition: rte_mempool.h:476
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1424
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void rte_mempool_list_dump(FILE *f)
#define RTE_MEMPOOL_MAX_OPS_IDX
Definition: rte_mempool.h:666
static int rte_mempool_empty(const struct rte_mempool *mp)
Definition: rte_mempool.h:1720
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
Definition: rte_mempool.h:573
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1764
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
Definition: rte_mempool.h:634
uint32_t flushthresh
Definition: rte_mempool.h:87
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2] __rte_cache_aligned
Definition: rte_mempool.h:95
unsigned int contig_block_size
Definition: rte_mempool.h:198
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
struct rte_mempool * mp
Definition: rte_mempool.h:182
rte_mempool_memchunk_free_cb_t * free_cb
Definition: rte_mempool.h:186
struct rte_mempool * mp
Definition: rte_mempool.h:137
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
uint32_t header_size
Definition: rte_mempool.h:103
uint32_t trailer_size
Definition: rte_mempool.h:104
uint32_t total_size
Definition: rte_mempool.h:105
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
Definition: rte_mempool.h:683
rte_spinlock_t sl
Definition: rte_mempool.h:678
char name[RTE_MEMPOOL_OPS_NAMESIZE]
Definition: rte_mempool.h:640
rte_mempool_alloc_t alloc
Definition: rte_mempool.h:641
rte_mempool_dequeue_t dequeue
Definition: rte_mempool.h:644
rte_mempool_get_info_t get_info
Definition: rte_mempool.h:659
rte_mempool_calc_mem_size_t calc_mem_size
Definition: rte_mempool.h:650
rte_mempool_get_count get_count
Definition: rte_mempool.h:645
rte_mempool_populate_t populate
Definition: rte_mempool.h:655
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
Definition: rte_mempool.h:663
rte_mempool_free_t free
Definition: rte_mempool.h:642
rte_mempool_enqueue_t enqueue
Definition: rte_mempool.h:643
uint32_t nb_mem_chunks
Definition: rte_mempool.h:237
const struct rte_memzone * mz
Definition: rte_mempool.h:212
struct rte_mempool_memhdr_list mem_list
Definition: rte_mempool.h:238
uint32_t populated_size
Definition: rte_mempool.h:235
uint32_t header_size
Definition: rte_mempool.h:220
uint64_t pool_id
Definition: rte_mempool.h:209
int32_t ops_index
Definition: rte_mempool.h:231
void * pool_config
Definition: rte_mempool.h:211
uint32_t trailer_size
Definition: rte_mempool.h:221
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:205
uint32_t size
Definition: rte_mempool.h:215
uint32_t cache_size
Definition: rte_mempool.h:216
unsigned int flags
Definition: rte_mempool.h:213
uint32_t elt_size
Definition: rte_mempool.h:219
unsigned private_data_size
Definition: rte_mempool.h:223
struct rte_mempool_cache * local_cache
Definition: rte_mempool.h:233
struct rte_mempool_objhdr_list elt_list
Definition: rte_mempool.h:236
void * pool_data
Definition: rte_mempool.h:208