summaryrefslogtreecommitdiff
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c24
1 files changed, 4 insertions, 20 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index b06d6ccc1966..140aad55b292 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -31,7 +31,6 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/writeback.h>
-#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
@@ -2791,7 +2790,6 @@ asmlinkage long sys_bdflush(int func, long data)
* Buffer-head allocation
*/
static kmem_cache_t *bh_cachep;
-static mempool_t *bh_mempool;
/*
* Once the number of bh's in the machine exceeds this level, we start
@@ -2825,7 +2823,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(void)
{
- struct buffer_head *ret = mempool_alloc(bh_mempool, GFP_NOFS);
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, GFP_NOFS);
if (ret) {
preempt_disable();
__get_cpu_var(bh_accounting).nr++;
@@ -2839,7 +2837,7 @@ EXPORT_SYMBOL(alloc_buffer_head);
void free_buffer_head(struct buffer_head *bh)
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
- mempool_free(bh, bh_mempool);
+ kmem_cache_free(bh_cachep, bh);
preempt_disable();
__get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
@@ -2847,7 +2845,8 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
-static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void
+init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
@@ -2858,19 +2857,6 @@ static void init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long fla
}
}
-static void *bh_mempool_alloc(int gfp_mask, void *pool_data)
-{
- return kmem_cache_alloc(bh_cachep, gfp_mask);
-}
-
-static void bh_mempool_free(void *element, void *pool_data)
-{
- return kmem_cache_free(bh_cachep, element);
-}
-
-#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
-#define MAX_UNUSED_BUFFERS NR_RESERVED+20
-
static void buffer_init_cpu(int cpu)
{
struct bh_accounting *bha = &per_cpu(bh_accounting, cpu);
@@ -2907,8 +2893,6 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
0, init_buffer_head, NULL);
- bh_mempool = mempool_create(MAX_UNUSED_BUFFERS, bh_mempool_alloc,
- bh_mempool_free, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh);