DPDK 23.11.0
Loading...
Searching...
No Matches
rte_mempool.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2022 SmartShare Systems
5 */
6
7#ifndef _RTE_MEMPOOL_H_
8#define _RTE_MEMPOOL_H_
9
37#include <stdio.h>
38#include <stdint.h>
39#include <inttypes.h>
40
41#include <rte_compat.h>
42#include <rte_config.h>
43#include <rte_spinlock.h>
44#include <rte_debug.h>
45#include <rte_lcore.h>
47#include <rte_ring.h>
48#include <rte_memcpy.h>
49#include <rte_common.h>
50
52
53#ifdef __cplusplus
54extern "C" {
55#endif
56
57#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
58#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
59#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
61#ifdef RTE_LIBRTE_MEMPOOL_STATS
68struct rte_mempool_debug_stats {
69 uint64_t put_bulk;
70 uint64_t put_objs;
71 uint64_t put_common_pool_bulk;
72 uint64_t put_common_pool_objs;
73 uint64_t get_common_pool_bulk;
74 uint64_t get_common_pool_objs;
75 uint64_t get_success_bulk;
76 uint64_t get_success_objs;
77 uint64_t get_fail_bulk;
78 uint64_t get_fail_objs;
79 uint64_t get_success_blks;
80 uint64_t get_fail_blks;
83#endif
84
89 uint32_t size;
90 uint32_t flushthresh;
91 uint32_t len;
92#ifdef RTE_LIBRTE_MEMPOOL_STATS
93 uint32_t unused;
94 /*
95 * Alternative location for the most frequently updated mempool statistics (per-lcore),
96 * providing faster update access when using a mempool cache.
97 */
98 struct {
99 uint64_t put_bulk;
100 uint64_t put_objs;
101 uint64_t get_success_bulk;
102 uint64_t get_success_objs;
103 } stats;
104#endif
111 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2] __rte_cache_aligned;
113
118 uint32_t elt_size;
119 uint32_t header_size;
120 uint32_t trailer_size;
121 uint32_t total_size;
123};
124
126#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
127 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
128#define RTE_MEMPOOL_MZ_PREFIX "MP_"
129
130/* "MP_<name>" */
131#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
132
133#ifndef RTE_MEMPOOL_ALIGN
137#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
138#endif
139
140#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
141
153 struct rte_mempool *mp;
155#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
156 uint64_t cookie;
157#endif
158};
159
163RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
164
165#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
166
173struct rte_mempool_objtlr {
174 uint64_t cookie;
175};
176
177#endif
178
182RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
183
188 void *opaque);
189
205
216
221 char name[RTE_MEMPOOL_NAMESIZE];
222 union {
223 void *pool_data;
224 uint64_t pool_id;
225 };
227 const struct rte_memzone *mz;
228 unsigned int flags;
230 uint32_t size;
231 uint32_t cache_size;
234 uint32_t elt_size;
235 uint32_t header_size;
236 uint32_t trailer_size;
246 int32_t ops_index;
247
250 uint32_t populated_size;
251 struct rte_mempool_objhdr_list elt_list;
252 uint32_t nb_mem_chunks;
253 struct rte_mempool_memhdr_list mem_list;
255#ifdef RTE_LIBRTE_MEMPOOL_STATS
260 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
261#endif
263
265#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
270#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
272#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
277#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
279#define RTE_MEMPOOL_F_SP_PUT 0x0004
284#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
286#define RTE_MEMPOOL_F_SC_GET 0x0008
291#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
293#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
295#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
300#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
302#define RTE_MEMPOOL_F_NON_IO 0x0040
303
307#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
308 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
309 | RTE_MEMPOOL_F_SP_PUT \
310 | RTE_MEMPOOL_F_SC_GET \
311 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
312 )
313
324#ifdef RTE_LIBRTE_MEMPOOL_STATS
325#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
326 unsigned int __lcore_id = rte_lcore_id(); \
327 if (likely(__lcore_id < RTE_MAX_LCORE)) \
328 (mp)->stats[__lcore_id].name += (n); \
329 else \
330 rte_atomic_fetch_add_explicit(&((mp)->stats[RTE_MAX_LCORE].name), \
331 (n), rte_memory_order_relaxed); \
332 } while (0)
333#else
334#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
335#endif
336
347#ifdef RTE_LIBRTE_MEMPOOL_STATS
348#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n))
349#else
350#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0)
351#endif
352
361#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
362 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
363 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
364
365/* return the header of a mempool object (internal) */
366static inline struct rte_mempool_objhdr *
367rte_mempool_get_header(void *obj)
368{
369 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
370 sizeof(struct rte_mempool_objhdr));
371}
372
382static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
383{
384 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
385 return hdr->mp;
386}
387
388/* return the trailer of a mempool object (internal) */
389static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
390{
391 struct rte_mempool *mp = rte_mempool_from_obj(obj);
392 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
393}
394
409void rte_mempool_check_cookies(const struct rte_mempool *mp,
410 void * const *obj_table_const, unsigned n, int free);
411
412#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
413#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
414 rte_mempool_check_cookies(mp, obj_table_const, n, free)
415#else
416#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
417#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
418
434void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
435 void * const *first_obj_table_const, unsigned int n, int free);
436
437#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
438#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
439 free) \
440 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
441 free)
442#else
443#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
444 free) \
445 do {} while (0)
446#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
447
448#define RTE_MEMPOOL_OPS_NAMESIZE 32
460typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
461
465typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
466
473typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
474 void * const *obj_table, unsigned int n);
475
482typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
483 void **obj_table, unsigned int n);
484
489 void **first_obj_table, unsigned int n);
490
494typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
495
519typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
520 uint32_t obj_num, uint32_t pg_shift,
521 size_t *min_chunk_size, size_t *align);
522
558ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
559 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
560 size_t *min_chunk_size, size_t *align);
561
570 uint32_t obj_num, uint32_t pg_shift,
571 size_t *min_chunk_size, size_t *align);
572
586 void *opaque, void *vaddr, rte_iova_t iova);
587
616typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
617 unsigned int max_objs,
618 void *vaddr, rte_iova_t iova, size_t len,
619 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
620
624#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
625
658int rte_mempool_op_populate_helper(struct rte_mempool *mp,
659 unsigned int flags, unsigned int max_objs,
660 void *vaddr, rte_iova_t iova, size_t len,
661 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
662
670 unsigned int max_objs,
671 void *vaddr, rte_iova_t iova, size_t len,
672 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
673
677typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
678 struct rte_mempool_info *info);
679
680
708
709#define RTE_MEMPOOL_MAX_OPS_IDX 16
728
731
741static inline struct rte_mempool_ops *
742rte_mempool_get_ops(int ops_index)
743{
744 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
745
746 return &rte_mempool_ops_table.ops[ops_index];
747}
748
758int
759rte_mempool_ops_alloc(struct rte_mempool *mp);
760
774static inline int
775rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
776 void **obj_table, unsigned n)
777{
778 struct rte_mempool_ops *ops;
779 int ret;
780
781 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
782 ops = rte_mempool_get_ops(mp->ops_index);
783 ret = ops->dequeue(mp, obj_table, n);
784 if (ret == 0) {
785 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
786 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
787 }
788 return ret;
789}
790
804static inline int
805rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
806 void **first_obj_table, unsigned int n)
807{
808 struct rte_mempool_ops *ops;
809
810 ops = rte_mempool_get_ops(mp->ops_index);
811 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
812 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
813 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
814}
815
829static inline int
830rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
831 unsigned n)
832{
833 struct rte_mempool_ops *ops;
834 int ret;
835
836 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
837 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
838 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
839 ops = rte_mempool_get_ops(mp->ops_index);
840 ret = ops->enqueue(mp, obj_table, n);
841#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
842 if (unlikely(ret < 0))
843 RTE_LOG(CRIT, MEMPOOL, "cannot enqueue %u objects to mempool %s\n",
844 n, mp->name);
845#endif
846 return ret;
847}
848
857unsigned
858rte_mempool_ops_get_count(const struct rte_mempool *mp);
859
879ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
880 uint32_t obj_num, uint32_t pg_shift,
881 size_t *min_chunk_size, size_t *align);
882
906int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
907 void *vaddr, rte_iova_t iova, size_t len,
909 void *obj_cb_arg);
910
924 struct rte_mempool_info *info);
925
932void
933rte_mempool_ops_free(struct rte_mempool *mp);
934
952int
954 void *pool_config);
955
967
973#define RTE_MEMPOOL_REGISTER_OPS(ops) \
974 RTE_INIT(mp_hdlr_init_##ops) \
975 { \
976 rte_mempool_register_ops(&ops); \
977 }
978
984typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
985 void *opaque, void *obj, unsigned obj_idx);
986typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
987
993typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
994 void *opaque, struct rte_mempool_memhdr *memhdr,
995 unsigned mem_idx);
996
1003typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
1004
1083struct rte_mempool *
1084rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1085 unsigned cache_size, unsigned private_data_size,
1086 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1087 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1088 int socket_id, unsigned flags);
1089
1124struct rte_mempool *
1125rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1126 unsigned cache_size, unsigned private_data_size,
1127 int socket_id, unsigned flags);
1139void
1141
1172int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1173 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1174 void *opaque);
1175
1202int
1204 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1205 void *opaque);
1206
1221
1236
1253 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1254
1271 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1272
1281void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1282
1297struct rte_mempool_cache *
1298rte_mempool_cache_create(uint32_t size, int socket_id);
1299
1306void
1308
1321rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1322{
1323 if (mp->cache_size == 0)
1324 return NULL;
1325
1326 if (lcore_id >= RTE_MAX_LCORE)
1327 return NULL;
1328
1329 rte_mempool_trace_default_cache(mp, lcore_id,
1330 &mp->local_cache[lcore_id]);
1331 return &mp->local_cache[lcore_id];
1332}
1333
1342static __rte_always_inline void
1344 struct rte_mempool *mp)
1345{
1346 if (cache == NULL)
1348 if (cache == NULL || cache->len == 0)
1349 return;
1350 rte_mempool_trace_cache_flush(cache, mp);
1351 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1352 cache->len = 0;
1353}
1354
1367static __rte_always_inline void
1368rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1369 unsigned int n, struct rte_mempool_cache *cache)
1370{
1371 void **cache_objs;
1372
1373 /* No cache provided */
1374 if (unlikely(cache == NULL))
1375 goto driver_enqueue;
1376
1377 /* increment stat now, adding in mempool always success */
1378 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1379 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1380
1381 /* The request itself is too big for the cache */
1382 if (unlikely(n > cache->flushthresh))
1383 goto driver_enqueue_stats_incremented;
1384
1385 /*
1386 * The cache follows the following algorithm:
1387 * 1. If the objects cannot be added to the cache without crossing
1388 * the flush threshold, flush the cache to the backend.
1389 * 2. Add the objects to the cache.
1390 */
1391
1392 if (cache->len + n <= cache->flushthresh) {
1393 cache_objs = &cache->objs[cache->len];
1394 cache->len += n;
1395 } else {
1396 cache_objs = &cache->objs[0];
1397 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1398 cache->len = n;
1399 }
1400
1401 /* Add the objects to the cache. */
1402 rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1403
1404 return;
1405
1406driver_enqueue:
1407
1408 /* increment stat now, adding in mempool always success */
1409 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1410 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1411
1412driver_enqueue_stats_incremented:
1413
1414 /* push objects to the backend */
1415 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1416}
1417
1418
1431static __rte_always_inline void
1432rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1433 unsigned int n, struct rte_mempool_cache *cache)
1434{
1435 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1436 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1437 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1438}
1439
1454static __rte_always_inline void
1455rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1456 unsigned int n)
1457{
1458 struct rte_mempool_cache *cache;
1460 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1461 rte_mempool_generic_put(mp, obj_table, n, cache);
1462}
1463
1476static __rte_always_inline void
1477rte_mempool_put(struct rte_mempool *mp, void *obj)
1478{
1479 rte_mempool_put_bulk(mp, &obj, 1);
1480}
1481
1496static __rte_always_inline int
1497rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1498 unsigned int n, struct rte_mempool_cache *cache)
1499{
1500 int ret;
1501 unsigned int remaining;
1502 uint32_t index, len;
1503 void **cache_objs;
1504
1505 /* No cache provided */
1506 if (unlikely(cache == NULL)) {
1507 remaining = n;
1508 goto driver_dequeue;
1509 }
1510
1511 /* The cache is a stack, so copy will be in reverse order. */
1512 cache_objs = &cache->objs[cache->len];
1513
1514 if (__extension__(__builtin_constant_p(n)) && n <= cache->len) {
1515 /*
1516 * The request size is known at build time, and
1517 * the entire request can be satisfied from the cache,
1518 * so let the compiler unroll the fixed length copy loop.
1519 */
1520 cache->len -= n;
1521 for (index = 0; index < n; index++)
1522 *obj_table++ = *--cache_objs;
1523
1524 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1525 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1526
1527 return 0;
1528 }
1529
1530 /*
1531 * Use the cache as much as we have to return hot objects first.
1532 * If the request size 'n' is known at build time, the above comparison
1533 * ensures that n > cache->len here, so omit RTE_MIN().
1534 */
1535 len = __extension__(__builtin_constant_p(n)) ? cache->len :
1536 RTE_MIN(n, cache->len);
1537 cache->len -= len;
1538 remaining = n - len;
1539 for (index = 0; index < len; index++)
1540 *obj_table++ = *--cache_objs;
1541
1542 /*
1543 * If the request size 'n' is known at build time, the case
1544 * where the entire request can be satisfied from the cache
1545 * has already been handled above, so omit handling it here.
1546 */
1547 if (!__extension__(__builtin_constant_p(n)) && remaining == 0) {
1548 /* The entire request is satisfied from the cache. */
1549
1550 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1551 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1552
1553 return 0;
1554 }
1555
1556 /* if dequeue below would overflow mem allocated for cache */
1557 if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1558 goto driver_dequeue;
1559
1560 /* Fill the cache from the backend; fetch size + remaining objects. */
1561 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1562 cache->size + remaining);
1563 if (unlikely(ret < 0)) {
1564 /*
1565 * We are buffer constrained, and not able to allocate
1566 * cache + remaining.
1567 * Do not fill the cache, just satisfy the remaining part of
1568 * the request directly from the backend.
1569 */
1570 goto driver_dequeue;
1571 }
1572
1573 /* Satisfy the remaining part of the request from the filled cache. */
1574 cache_objs = &cache->objs[cache->size + remaining];
1575 for (index = 0; index < remaining; index++)
1576 *obj_table++ = *--cache_objs;
1577
1578 cache->len = cache->size;
1579
1580 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1581 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1582
1583 return 0;
1584
1585driver_dequeue:
1586
1587 /* Get remaining objects directly from the backend. */
1588 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1589
1590 if (ret < 0) {
1591 if (likely(cache != NULL)) {
1592 cache->len = n - remaining;
1593 /*
1594 * No further action is required to roll the first part
1595 * of the request back into the cache, as objects in
1596 * the cache are intact.
1597 */
1598 }
1599
1600 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1601 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1602 } else {
1603 if (likely(cache != NULL)) {
1604 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1605 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1606 } else {
1607 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1608 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1609 }
1610 }
1611
1612 return ret;
1613}
1614
1635static __rte_always_inline int
1636rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1637 unsigned int n, struct rte_mempool_cache *cache)
1638{
1639 int ret;
1640 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1641 if (ret == 0)
1642 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1643 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1644 return ret;
1645}
1646
1669static __rte_always_inline int
1670rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1671{
1672 struct rte_mempool_cache *cache;
1674 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1675 return rte_mempool_generic_get(mp, obj_table, n, cache);
1676}
1677
1698static __rte_always_inline int
1699rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1700{
1701 return rte_mempool_get_bulk(mp, obj_p, 1);
1702}
1703
1725static __rte_always_inline int
1727 void **first_obj_table, unsigned int n)
1728{
1729 int ret;
1730
1731 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1732 if (ret == 0) {
1733 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1734 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1735 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1736 1);
1737 } else {
1738 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1739 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1740 }
1741
1742 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1743 return ret;
1744}
1745
1758unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1759
1772unsigned int
1774
1788static inline int
1790{
1791 return rte_mempool_avail_count(mp) == mp->size;
1792}
1793
1807static inline int
1809{
1810 return rte_mempool_avail_count(mp) == 0;
1811}
1812
1823static inline rte_iova_t
1825{
1826 const struct rte_mempool_objhdr *hdr;
1827 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1828 sizeof(*hdr));
1829 return hdr->iova;
1830}
1831
1843
1852static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1853{
1854 return (char *)mp +
1855 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1856}
1857
1865
1878
1896uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1897 struct rte_mempool_objsz *sz);
1898
1907void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1908 void *arg);
1909
1914int
1915rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1916
1927
1937typedef void (rte_mempool_event_callback)(
1938 enum rte_mempool_event event,
1939 struct rte_mempool *mp,
1940 void *user_data);
1941
1958__rte_internal
1959int
1960rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1961 void *user_data);
1962
1976__rte_internal
1977int
1978rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1979 void *user_data);
1980
1981#ifdef __cplusplus
1982}
1983#endif
1984
1985#endif /* _RTE_MEMPOOL_H_ */
#define likely(x)
#define unlikely(x)
#define __rte_cache_aligned
Definition rte_common.h:524
#define RTE_MIN(a, b)
Definition rte_common.h:581
#define RTE_PTR_SUB(ptr, x)
Definition rte_common.h:376
uint64_t rte_iova_t
Definition rte_common.h:556
#define RTE_PTR_ADD(ptr, x)
Definition rte_common.h:371
#define RTE_CACHE_GUARD
Definition rte_common.h:541
#define __rte_always_inline
Definition rte_common.h:331
static unsigned rte_lcore_id(void)
Definition rte_lcore.h:78
#define RTE_LOG(l, t,...)
Definition rte_log.h:331
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
void rte_mempool_free(struct rte_mempool *mp)
rte_mempool_event
@ RTE_MEMPOOL_EVENT_DESTROY
@ RTE_MEMPOOL_EVENT_READY
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
void(* rte_mempool_free_t)(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
static int rte_mempool_full(const struct rte_mempool *mp)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void rte_mempool_list_dump(FILE *f)
#define RTE_MEMPOOL_MAX_OPS_IDX
static int rte_mempool_empty(const struct rte_mempool *mp)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
uint32_t flushthresh
Definition rte_mempool.h:90
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2] __rte_cache_aligned
unsigned int contig_block_size
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
struct rte_mempool * mp
rte_mempool_memchunk_free_cb_t * free_cb
struct rte_mempool * mp
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
uint32_t trailer_size
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_spinlock_t sl
char name[RTE_MEMPOOL_OPS_NAMESIZE]
rte_mempool_alloc_t alloc
rte_mempool_dequeue_t dequeue
rte_mempool_get_info_t get_info
rte_mempool_calc_mem_size_t calc_mem_size
rte_mempool_get_count get_count
rte_mempool_populate_t populate
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
rte_mempool_free_t free
rte_mempool_enqueue_t enqueue
uint32_t nb_mem_chunks
const struct rte_memzone * mz
struct rte_mempool_memhdr_list mem_list
uint32_t populated_size
uint32_t header_size
uint64_t pool_id
int32_t ops_index
void * pool_config
uint32_t trailer_size
char name[RTE_MEMPOOL_NAMESIZE]
uint32_t size
uint32_t cache_size
unsigned int flags
uint32_t elt_size
unsigned private_data_size
struct rte_mempool_cache * local_cache
struct rte_mempool_objhdr_list elt_list
void * pool_data