summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@digeo.com>2003-02-03 16:59:43 -0800
committerLinus Torvalds <torvalds@home.transmeta.com>2003-02-03 16:59:43 -0800
commit3ac8c84597560f3c8a950d36d0e7a95cbe43f9d1 (patch)
tree4276932c9cf25e403afe17443b0dc9f330015260
parent99c88bc2fa292422ff006654f6014bf34b6dd83b (diff)
[PATCH] remove __GFP_HIGHIO
Patch From: Hugh Dickins <hugh@veritas.com> Recently noticed that __GFP_HIGHIO has played no real part since bounce buffering was converted to mempool in 2.5.12: so this patch (over 2.5.58-mm1) removes it and GFP_NOHIGHIO and SLAB_NOHIGHIO. Also removes GFP_KSWAPD, in 2.5 same as GFP_KERNEL; leaves GFP_USER, which can be a useful comment, even though in 2.5 same as GFP_KERNEL. One anomaly needs comment: strictly, if there's no __GFP_HIGHIO, then GFP_NOHIGHIO translates to GFP_NOFS; but GFP_NOFS looks wrong in the block layer, and if you follow them down, you find that GFP_NOFS and GFP_NOIO behave the same way in mempool_alloc - so I've used the less surprising GFP_NOIO to replace GFP_NOHIGHIO.
-rw-r--r--drivers/block/ll_rw_blk.c2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/slab.h3
-rw-r--r--mm/highmem.c13
-rw-r--r--mm/vmscan.c4
6 files changed, 19 insertions, 28 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 36436a7aa57f..f8a4e7a81f4b 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -274,7 +274,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
} else
- q->bounce_gfp = GFP_NOHIGHIO;
+ q->bounce_gfp = GFP_NOIO;
/*
* keep this for debugging for now...
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 90171e65e989..c599ea36233b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -292,7 +292,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void);
-inline void blk_queue_bounce(request_queue_t *q, struct bio **bio);
+extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 54bf03eaf3e7..c9fb5039e753 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -14,20 +14,17 @@
/* Action modifiers - doesn't change the zoning */
#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
#define __GFP_HIGH 0x20 /* Should access emergency pools? */
-#define __GFP_IO 0x40 /* Can start low memory physical IO? */
-#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
-#define __GFP_FS 0x100 /* Can call down to low-level FS? */
-#define __GFP_COLD 0x200 /* Cache-cold page required */
-#define __GFP_NOWARN 0x400 /* Suppress page allocation failure warning */
-
-#define GFP_NOHIGHIO ( __GFP_WAIT | __GFP_IO)
-#define GFP_NOIO ( __GFP_WAIT)
-#define GFP_NOFS ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
+#define __GFP_IO 0x40 /* Can start physical IO? */
+#define __GFP_FS 0x80 /* Can call down to low-level FS? */
+#define __GFP_COLD 0x100 /* Cache-cold page required */
+#define __GFP_NOWARN 0x200 /* Suppress page allocation failure warning */
+
#define GFP_ATOMIC (__GFP_HIGH)
-#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
-#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
-#define GFP_KERNEL ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
-#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
+#define GFP_NOIO (__GFP_WAIT)
+#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 997bc710bfb0..220a672af798 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -17,13 +17,12 @@ typedef struct kmem_cache_s kmem_cache_t;
/* flags for kmem_cache_alloc() */
#define SLAB_NOFS GFP_NOFS
#define SLAB_NOIO GFP_NOIO
-#define SLAB_NOHIGHIO GFP_NOHIGHIO
#define SLAB_ATOMIC GFP_ATOMIC
#define SLAB_USER GFP_USER
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_DMA GFP_DMA
-#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS|__GFP_COLD|__GFP_NOWARN)
+#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|__GFP_COLD|__GFP_NOWARN)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create().
diff --git a/mm/highmem.c b/mm/highmem.c
index ee5e622dcbfe..33adcf242697 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -366,7 +366,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
-void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
+static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
@@ -387,7 +387,7 @@ void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
* irk, bounce it
*/
if (!bio)
- bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
+ bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
to = bio->bi_io_vec + i;
@@ -447,10 +447,9 @@ void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
*bio_orig = bio;
}
-inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
{
mempool_t *pool;
- int bio_gfp;
/*
* for non-isa bounce case, just check if the bounce pfn is equal
@@ -460,20 +459,16 @@ inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
if (!(q->bounce_gfp & GFP_DMA)) {
if (q->bounce_pfn >= blk_max_pfn)
return;
-
- bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
-
- bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
/*
* slow path
*/
- __blk_queue_bounce(q, bio_orig, bio_gfp, pool);
+ __blk_queue_bounce(q, bio_orig, pool);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a8b9d2c450c4..60c534dc4129 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -894,9 +894,9 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
max_scan = to_reclaim * 2;
if (max_scan < SWAP_CLUSTER_MAX)
max_scan = SWAP_CLUSTER_MAX;
- to_free -= shrink_zone(zone, max_scan, GFP_KSWAPD,
+ to_free -= shrink_zone(zone, max_scan, GFP_KERNEL,
to_reclaim, &nr_mapped, ps, priority);
- shrink_slab(max_scan + nr_mapped, GFP_KSWAPD);
+ shrink_slab(max_scan + nr_mapped, GFP_KERNEL);
if (zone->all_unreclaimable)
continue;
if (zone->pages_scanned > zone->present_pages * 2)