From 0f2620ffc41d117cc28bc053efe2dc837cf748dd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Nov 2025 09:39:42 +0100 Subject: fault-inject: make enum fault_flags available unconditionally This will allow using should_fail_ex from code without having to make it conditional on CONFIG_FAULT_INJECTION. Signed-off-by: Christoph Hellwig Link: https://patch.msgid.link/20251113084022.1255121-2-hch@lst.de Signed-off-by: Vlastimil Babka --- include/linux/fault-inject.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 8c829d28dcf3..58fd14c82270 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -8,6 +8,10 @@ struct dentry; struct kmem_cache; +enum fault_flags { + FAULT_NOWARN = 1 << 0, +}; + #ifdef CONFIG_FAULT_INJECTION #include @@ -36,10 +40,6 @@ struct fault_attr { struct dentry *dname; }; -enum fault_flags { - FAULT_NOWARN = 1 << 0, -}; - #define FAULT_ATTR_INITIALIZER { \ .interval = 1, \ .times = ATOMIC_INIT(1), \ -- cgit v1.2.3 From ac529d86ad26d632d3c70b7c5b839282a3294d2f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Nov 2025 09:39:48 +0100 Subject: mempool: add mempool_{alloc,free}_bulk Add a version of the mempool allocator that works for batch allocations of multiple objects. Calling mempool_alloc in a loop is not safe because it could deadlock if multiple threads are performing such an allocation at the same time. As an extra benefit the interface is build so that the same array can be used for alloc_pages_bulk / release_pages so that at least for page backed mempools the fast path can use a nice batch optimization. Note that mempool_alloc_bulk does not take a gfp_mask argument as it must always be able to sleep and doesn't support any non-trivial modifiers. NOFO or NOIO constrainst must be set through the scoped API. Signed-off-by: Christoph Hellwig Link: https://patch.msgid.link/20251113084022.1255121-8-hch@lst.de Signed-off-by: Vlastimil Babka --- include/linux/mempool.h | 6 ++ mm/mempool.c | 177 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 141 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 34941a4b9026..e914fec0e119 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -66,9 +66,15 @@ extern void mempool_destroy(mempool_t *pool); extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; #define mempool_alloc(...) \ alloc_hooks(mempool_alloc_noprof(__VA_ARGS__)) +int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem, + unsigned int count, unsigned int allocated); +#define mempool_alloc_bulk(...) \ + alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__)) extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; extern void mempool_free(void *element, mempool_t *pool); +unsigned int mempool_free_bulk(struct mempool *pool, void **elem, + unsigned int count); /* * A mempool_alloc_t and mempool_free_t that get the memory from diff --git a/mm/mempool.c b/mm/mempool.c index 6bcc319d547d..b45bcf415147 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -21,11 +21,21 @@ #include "slab.h" static DECLARE_FAULT_ATTR(fail_mempool_alloc); +static DECLARE_FAULT_ATTR(fail_mempool_alloc_bulk); static int __init mempool_faul_inject_init(void) { - return PTR_ERR_OR_ZERO(fault_create_debugfs_attr("fail_mempool_alloc", + int error; + + error = PTR_ERR_OR_ZERO(fault_create_debugfs_attr("fail_mempool_alloc", NULL, &fail_mempool_alloc)); + if (error) + return error; + + /* booting will fail on error return here, don't bother to cleanup */ + return PTR_ERR_OR_ZERO( + fault_create_debugfs_attr("fail_mempool_alloc_bulk", NULL, + &fail_mempool_alloc_bulk)); } late_initcall(mempool_faul_inject_init); @@ -380,15 +390,22 @@ out: } EXPORT_SYMBOL(mempool_resize); -static void *mempool_alloc_from_pool(struct mempool *pool, gfp_t gfp_mask) +static unsigned int mempool_alloc_from_pool(struct mempool *pool, void **elems, + unsigned int count, unsigned int allocated, + gfp_t gfp_mask) { unsigned long flags; - void *element; + unsigned int i; spin_lock_irqsave(&pool->lock, flags); - if (unlikely(!pool->curr_nr)) + if (unlikely(pool->curr_nr < count - allocated)) goto fail; - element = remove_element(pool); + for (i = 0; i < count; i++) { + if (!elems[i]) { + elems[i] = remove_element(pool); + allocated++; + } + } spin_unlock_irqrestore(&pool->lock, flags); /* Paired with rmb in mempool_free(), read comment there. */ @@ -398,8 +415,9 @@ static void *mempool_alloc_from_pool(struct mempool *pool, gfp_t gfp_mask) * Update the allocation stack trace as this is more useful for * debugging. */ - kmemleak_update_trace(element); - return element; + for (i = 0; i < count; i++) + kmemleak_update_trace(elems[i]); + return allocated; fail: if (gfp_mask & __GFP_DIRECT_RECLAIM) { @@ -421,7 +439,7 @@ fail: spin_unlock_irqrestore(&pool->lock, flags); } - return NULL; + return allocated; } /* @@ -437,6 +455,65 @@ static inline gfp_t mempool_adjust_gfp(gfp_t *gfp_mask) return *gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); } +/** + * mempool_alloc_bulk - allocate multiple elements from a memory pool + * @pool: pointer to the memory pool + * @elems: partially or fully populated elements array + * @count: number of entries in @elem that need to be allocated + * @allocated: number of entries in @elem already allocated + * + * Allocate elements for each slot in @elem that is non-%NULL. This is done by + * first calling into the alloc_fn supplied at pool initialization time, and + * dipping into the reserved pool when alloc_fn fails to allocate an element. + * + * On return all @count elements in @elems will be populated. + * + * Return: Always 0. If it wasn't for %$#^$ alloc tags, it would return void. + */ +int mempool_alloc_bulk_noprof(struct mempool *pool, void **elems, + unsigned int count, unsigned int allocated) +{ + gfp_t gfp_mask = GFP_KERNEL; + gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask); + unsigned int i = 0; + + VM_WARN_ON_ONCE(count > pool->min_nr); + might_alloc(gfp_mask); + + /* + * If an error is injected, fail all elements in a bulk allocation so + * that we stress the multiple elements missing path. + */ + if (should_fail_ex(&fail_mempool_alloc_bulk, 1, FAULT_NOWARN)) { + pr_info("forcing mempool usage for %pS\n", + (void *)_RET_IP_); + goto use_pool; + } + +repeat_alloc: + /* + * Try to allocate the elements using the allocation callback first as + * that might succeed even when the caller's bulk allocation did not. + */ + for (i = 0; i < count; i++) { + if (elems[i]) + continue; + elems[i] = pool->alloc(gfp_temp, pool->pool_data); + if (unlikely(!elems[i])) + goto use_pool; + allocated++; + } + + return 0; + +use_pool: + allocated = mempool_alloc_from_pool(pool, elems, count, allocated, + gfp_temp); + gfp_temp = gfp_mask; + goto repeat_alloc; +} +EXPORT_SYMBOL_GPL(mempool_alloc_bulk_noprof); + /** * mempool_alloc - allocate an element from a memory pool * @pool: pointer to the memory pool @@ -478,8 +555,7 @@ repeat_alloc: * sleep in mempool_alloc_from_pool. Retry the allocation * with all flags set in that case. */ - element = mempool_alloc_from_pool(pool, gfp_temp); - if (!element) { + if (!mempool_alloc_from_pool(pool, &element, 1, 0, gfp_temp)) { if (gfp_temp != gfp_mask) { gfp_temp = gfp_mask; goto repeat_alloc; @@ -508,26 +584,33 @@ EXPORT_SYMBOL(mempool_alloc_noprof); */ void *mempool_alloc_preallocated(mempool_t *pool) { - return mempool_alloc_from_pool(pool, GFP_NOWAIT); + void *element = NULL; + + mempool_alloc_from_pool(pool, &element, 1, 0, GFP_NOWAIT); + return element; } EXPORT_SYMBOL(mempool_alloc_preallocated); /** - * mempool_free - return an element to a mempool - * @element: pointer to element + * mempool_free_bulk - return elements to a mempool * @pool: pointer to the memory pool + * @elems: elements to return + * @count: number of elements to return * - * Returns @element to @pool if it needs replenishing, else frees it using - * the free_fn callback in @pool. + * Returns a number of elements from the start of @elem to @pool if @pool needs + * replenishing and sets their slots in @elem to NULL. Other elements are left + * in @elem. * - * This function only sleeps if the free_fn callback sleeps. + * Return: number of elements transferred to @pool. Elements are always + * transferred from the beginning of @elem, so the return value can be used as + * an offset into @elem for the freeing the remaining elements in the caller. */ -void mempool_free(void *element, mempool_t *pool) +unsigned int mempool_free_bulk(struct mempool *pool, void **elems, + unsigned int count) { unsigned long flags; - - if (unlikely(element == NULL)) - return; + unsigned int freed = 0; + bool added = false; /* * Paired with the wmb in mempool_alloc(). The preceding read is @@ -561,21 +644,6 @@ void mempool_free(void *element, mempool_t *pool) * Waiters happen iff curr_nr is 0 and the above guarantee also * ensures that there will be frees which return elements to the * pool waking up the waiters. - */ - if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { - spin_lock_irqsave(&pool->lock, flags); - if (likely(pool->curr_nr < pool->min_nr)) { - add_element(pool, element); - spin_unlock_irqrestore(&pool->lock, flags); - if (wq_has_sleeper(&pool->wait)) - wake_up(&pool->wait); - return; - } - spin_unlock_irqrestore(&pool->lock, flags); - } - - /* - * Handle the min_nr = 0 edge case: * * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds, * so waiters sleeping on pool->wait would never be woken by the @@ -583,20 +651,45 @@ void mempool_free(void *element, mempool_t *pool) * allocation of element when both min_nr and curr_nr are 0, and * any active waiters are properly awakened. */ - if (unlikely(pool->min_nr == 0 && + if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { + spin_lock_irqsave(&pool->lock, flags); + while (pool->curr_nr < pool->min_nr && freed < count) { + add_element(pool, elems[freed++]); + added = true; + } + spin_unlock_irqrestore(&pool->lock, flags); + } else if (unlikely(pool->min_nr == 0 && READ_ONCE(pool->curr_nr) == 0)) { + /* Handle the min_nr = 0 edge case: */ spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr == 0)) { - add_element(pool, element); - spin_unlock_irqrestore(&pool->lock, flags); - if (wq_has_sleeper(&pool->wait)) - wake_up(&pool->wait); - return; + add_element(pool, elems[freed++]); + added = true; } spin_unlock_irqrestore(&pool->lock, flags); } - pool->free(element, pool->pool_data); + if (unlikely(added) && wq_has_sleeper(&pool->wait)) + wake_up(&pool->wait); + + return freed; +} +EXPORT_SYMBOL_GPL(mempool_free_bulk); + +/** + * mempool_free - return an element to the pool. + * @element: element to return + * @pool: pointer to the memory pool + * + * Returns @element to @pool if it needs replenishing, else frees it using + * the free_fn callback in @pool. + * + * This function only sleeps if the free_fn callback sleeps. + */ +void mempool_free(void *element, struct mempool *pool) +{ + if (likely(element) && !mempool_free_bulk(pool, &element, 1)) + pool->free(element, pool->pool_data); } EXPORT_SYMBOL(mempool_free); -- cgit v1.2.3 From 8b41fb80a2cc023591f47d63b094e96af9c2c615 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Nov 2025 09:39:50 +0100 Subject: mempool: remove mempool_{init,create}_kvmalloc_pool This was added for bcachefs and is unused now. Signed-off-by: Christoph Hellwig Link: https://patch.msgid.link/20251113084022.1255121-10-hch@lst.de Signed-off-by: Vlastimil Babka --- include/linux/mempool.h | 13 ------------- mm/mempool.c | 13 ------------- 2 files changed, 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempool.h b/include/linux/mempool.h index e914fec0e119..d9332485e8ca 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -103,19 +103,6 @@ void mempool_kfree(void *element, void *pool_data); mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \ (void *)(unsigned long)(_size)) -void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); -void mempool_kvfree(void *element, void *pool_data); - -static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size) -{ - return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size); -} - -static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size) -{ - return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size); -} - /* * A mempool_alloc_t and mempool_free_t for a simple page allocator that * allocates pages of the order specified by pool_data diff --git a/mm/mempool.c b/mm/mempool.c index 9ec3a04a0130..0e1e015998e7 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -728,19 +728,6 @@ void mempool_kfree(void *element, void *pool_data) } EXPORT_SYMBOL(mempool_kfree); -void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) -{ - size_t size = (size_t)pool_data; - return kvmalloc(size, gfp_mask); -} -EXPORT_SYMBOL(mempool_kvmalloc); - -void mempool_kvfree(void *element, void *pool_data) -{ - kvfree(element); -} -EXPORT_SYMBOL(mempool_kvfree); - /* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data. -- cgit v1.2.3 From 0cab6873b7305abdd0acd95ee8cfa56b983500da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Nov 2025 09:39:51 +0100 Subject: mempool: de-typedef Switch all uses of the deprecated mempool_t typedef in the core mempool code to use struct mempool instead. Signed-off-by: Christoph Hellwig Link: https://patch.msgid.link/20251113084022.1255121-11-hch@lst.de Signed-off-by: Vlastimil Babka --- include/linux/mempool.h | 39 +++++++++++++++++++------------------- mm/mempool.c | 50 +++++++++++++++++++++++++------------------------ 2 files changed, 45 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempool.h b/include/linux/mempool.h index d9332485e8ca..e8e440e04a06 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -27,32 +27,31 @@ typedef struct mempool { wait_queue_head_t wait; } mempool_t; -static inline bool mempool_initialized(mempool_t *pool) +static inline bool mempool_initialized(struct mempool *pool) { return pool->elements != NULL; } -static inline bool mempool_is_saturated(mempool_t *pool) +static inline bool mempool_is_saturated(struct mempool *pool) { return READ_ONCE(pool->curr_nr) >= pool->min_nr; } -void mempool_exit(mempool_t *pool); -int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int node_id); - -int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); +void mempool_exit(struct mempool *pool); +int mempool_init_node(struct mempool *pool, int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data, gfp_t gfp_mask, int node_id); +int mempool_init_noprof(struct mempool *pool, int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data); #define mempool_init(...) \ alloc_hooks(mempool_init_noprof(__VA_ARGS__)) -extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data); - -extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int nid); +struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +struct mempool *mempool_create_node_noprof(int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data, gfp_t gfp_mask, int nid); #define mempool_create_node(...) \ alloc_hooks(mempool_create_node_noprof(__VA_ARGS__)) @@ -60,10 +59,10 @@ extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_ mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \ GFP_KERNEL, NUMA_NO_NODE) -extern int mempool_resize(mempool_t *pool, int new_min_nr); -extern void mempool_destroy(mempool_t *pool); +int mempool_resize(struct mempool *pool, int new_min_nr); +void mempool_destroy(struct mempool *pool); -extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; +void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc; #define mempool_alloc(...) \ alloc_hooks(mempool_alloc_noprof(__VA_ARGS__)) int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem, @@ -71,8 +70,8 @@ int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem, #define mempool_alloc_bulk(...) \ alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__)) -extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; -extern void mempool_free(void *element, mempool_t *pool); +void *mempool_alloc_preallocated(struct mempool *pool) __malloc; +void mempool_free(void *element, struct mempool *pool); unsigned int mempool_free_bulk(struct mempool *pool, void **elem, unsigned int count); diff --git a/mm/mempool.c b/mm/mempool.c index 0e1e015998e7..89ab7bba5c9c 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -40,7 +40,7 @@ static int __init mempool_faul_inject_init(void) late_initcall(mempool_faul_inject_init); #ifdef CONFIG_SLUB_DEBUG_ON -static void poison_error(mempool_t *pool, void *element, size_t size, +static void poison_error(struct mempool *pool, void *element, size_t size, size_t byte) { const int nr = pool->curr_nr; @@ -57,7 +57,7 @@ static void poison_error(mempool_t *pool, void *element, size_t size, dump_stack(); } -static void __check_element(mempool_t *pool, void *element, size_t size) +static void __check_element(struct mempool *pool, void *element, size_t size) { u8 *obj = element; size_t i; @@ -73,7 +73,7 @@ static void __check_element(mempool_t *pool, void *element, size_t size) memset(obj, POISON_INUSE, size); } -static void check_element(mempool_t *pool, void *element) +static void check_element(struct mempool *pool, void *element) { /* Skip checking: KASAN might save its metadata in the element. */ if (kasan_enabled()) @@ -102,7 +102,7 @@ static void __poison_element(void *element, size_t size) obj[size - 1] = POISON_END; } -static void poison_element(mempool_t *pool, void *element) +static void poison_element(struct mempool *pool, void *element) { /* Skip poisoning: KASAN might save its metadata in the element. */ if (kasan_enabled()) @@ -123,15 +123,16 @@ static void poison_element(mempool_t *pool, void *element) } } #else /* CONFIG_SLUB_DEBUG_ON */ -static inline void check_element(mempool_t *pool, void *element) +static inline void check_element(struct mempool *pool, void *element) { } -static inline void poison_element(mempool_t *pool, void *element) +static inline void poison_element(struct mempool *pool, void *element) { } #endif /* CONFIG_SLUB_DEBUG_ON */ -static __always_inline bool kasan_poison_element(mempool_t *pool, void *element) +static __always_inline bool kasan_poison_element(struct mempool *pool, + void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) return kasan_mempool_poison_object(element); @@ -141,7 +142,7 @@ static __always_inline bool kasan_poison_element(mempool_t *pool, void *element) return true; } -static void kasan_unpoison_element(mempool_t *pool, void *element) +static void kasan_unpoison_element(struct mempool *pool, void *element) { if (pool->alloc == mempool_kmalloc) kasan_mempool_unpoison_object(element, (size_t)pool->pool_data); @@ -153,7 +154,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element) (unsigned long)pool->pool_data); } -static __always_inline void add_element(mempool_t *pool, void *element) +static __always_inline void add_element(struct mempool *pool, void *element) { BUG_ON(pool->min_nr != 0 && pool->curr_nr >= pool->min_nr); poison_element(pool, element); @@ -161,7 +162,7 @@ static __always_inline void add_element(mempool_t *pool, void *element) pool->elements[pool->curr_nr++] = element; } -static void *remove_element(mempool_t *pool) +static void *remove_element(struct mempool *pool) { void *element = pool->elements[--pool->curr_nr]; @@ -182,7 +183,7 @@ static void *remove_element(mempool_t *pool) * May be called on a zeroed but uninitialized mempool (i.e. allocated with * kzalloc()). */ -void mempool_exit(mempool_t *pool) +void mempool_exit(struct mempool *pool) { while (pool->curr_nr) { void *element = remove_element(pool); @@ -201,7 +202,7 @@ EXPORT_SYMBOL(mempool_exit); * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. */ -void mempool_destroy(mempool_t *pool) +void mempool_destroy(struct mempool *pool) { if (unlikely(!pool)) return; @@ -211,9 +212,9 @@ void mempool_destroy(mempool_t *pool) } EXPORT_SYMBOL(mempool_destroy); -int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int node_id) +int mempool_init_node(struct mempool *pool, int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data, gfp_t gfp_mask, int node_id) { spin_lock_init(&pool->lock); pool->min_nr = min_nr; @@ -263,8 +264,9 @@ EXPORT_SYMBOL(mempool_init_node); * * Return: %0 on success, negative error code otherwise. */ -int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data) +int mempool_init_noprof(struct mempool *pool, int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data) { return mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); @@ -290,11 +292,11 @@ EXPORT_SYMBOL(mempool_init_noprof); * * Return: pointer to the created memory pool object or %NULL on error. */ -mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int node_id) +struct mempool *mempool_create_node_noprof(int min_nr, + mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, + void *pool_data, gfp_t gfp_mask, int node_id) { - mempool_t *pool; + struct mempool *pool; pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) @@ -328,7 +330,7 @@ EXPORT_SYMBOL(mempool_create_node_noprof); * * Return: %0 on success, negative error code otherwise. */ -int mempool_resize(mempool_t *pool, int new_min_nr) +int mempool_resize(struct mempool *pool, int new_min_nr) { void *element; void **new_elements; @@ -530,7 +532,7 @@ EXPORT_SYMBOL_GPL(mempool_alloc_bulk_noprof); * an element. Allocation failure can only happen when @gfp_mask does not * include %__GFP_DIRECT_RECLAIM. */ -void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) +void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) { gfp_t gfp_temp = mempool_adjust_gfp(&gfp_mask); void *element; @@ -582,7 +584,7 @@ EXPORT_SYMBOL(mempool_alloc_noprof); * Return: pointer to the allocated element or %NULL if no elements are * available. */ -void *mempool_alloc_preallocated(mempool_t *pool) +void *mempool_alloc_preallocated(struct mempool *pool) { void *element = NULL; -- cgit v1.2.3