diff options
| author | Jens Axboe <axboe@suse.de> | 2002-10-27 16:49:35 -0800 |
|---|---|---|
| committer | Jens Axboe <axboe@suse.de> | 2002-10-27 16:49:35 -0800 |
| commit | d50059ecf078224b00b0b341173fd35dbbd0eea3 (patch) | |
| tree | 345dadaeb939be146b74853c2135ff4e09f92489 | |
| parent | 1b59d3cb089a2d3bb25ce1a2354bd1a0f794e8c5 (diff) | |
[PATCH] end_io bouncing
o Split blk_queue_bounce() into a slow and fast path. The fast path is
inlined, only if we actually need to check the bio for possible
bounces (and bounce) do we enter __blk_queue_bounce() slow path.
o Fix a nasty bug that could cause corruption for file systems not
using PAGE_CACHE_SIZE blok size! We were not correctly setting the
'to' bv_offset correctly.
o Add BIO_BOUNCE flag. Later patches will use this for debug checking.
| -rw-r--r-- | include/linux/bio.h | 1 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 2 | ||||
| -rw-r--r-- | mm/highmem.c | 79 |
3 files changed, 46 insertions, 36 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index 93ac8216aae4..d5decec17fe7 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -101,6 +101,7 @@ struct bio { #define BIO_EOF 2 /* out-out-bounds error */ #define BIO_SEG_VALID 3 /* nr_hw_seg valid */ #define BIO_CLONED 4 /* doesn't own data */ +#define BIO_BOUNCED 5 /* bio is a bounce bio */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 42e81a4a0cab..6437522be832 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -268,7 +268,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) extern int init_emergency_isa_pool(void); -void blk_queue_bounce(request_queue_t *q, struct bio **bio); +inline void blk_queue_bounce(request_queue_t *q, struct bio **bio); #define rq_for_each_bio(bio, rq) \ if ((rq->bio)) \ diff --git a/mm/highmem.c b/mm/highmem.c index 68bacfed4618..b80f2da52fe7 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int return 0; } -void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) +void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp, + mempool_t *pool) { struct page *page; struct bio *bio = NULL; - int i, rw = bio_data_dir(*bio_orig), bio_gfp; + int i, rw = bio_data_dir(*bio_orig); struct bio_vec *to, *from; - mempool_t *pool; - unsigned long pfn = q->bounce_pfn; - int gfp = q->bounce_gfp; - - BUG_ON((*bio_orig)->bi_idx); - - /* - * for non-isa bounce case, just check if the bounce pfn is equal - * to or bigger than the highest pfn in the system -- in that case, - * don't waste time iterating over bio segments - */ - if (!(gfp & GFP_DMA)) { - if (pfn >= blk_max_pfn) - return; - - bio_gfp = GFP_NOHIGHIO; - pool = page_pool; - } else { - BUG_ON(!isa_page_pool); - bio_gfp = GFP_NOIO; - pool = isa_page_pool; - } bio_for_each_segment(from, *bio_orig, i) { page = from->bv_page; @@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) /* * is destination page below bounce pfn? */ - if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < pfn) + if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < q->bounce_pfn) continue; /* @@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) to = bio->bi_io_vec + i; - to->bv_page = mempool_alloc(pool, gfp); + to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_len = from->bv_len; to->bv_offset = from->bv_offset; - if (rw & WRITE) { + if (rw == WRITE) { char *vto, *vfrom; vto = page_address(to->bv_page) + to->bv_offset; @@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) * pages */ bio_for_each_segment(from, *bio_orig, i) { - to = &bio->bi_io_vec[i]; + to = bio_iovec_idx(bio, i); if (!to->bv_page) { to->bv_page = from->bv_page; to->bv_len = from->bv_len; - to->bv_offset = to->bv_offset; + to->bv_offset = from->bv_offset; } } bio->bi_bdev = (*bio_orig)->bi_bdev; + bio->bi_flags |= (1 << BIO_BOUNCED); bio->bi_sector = (*bio_orig)->bi_sector; bio->bi_rw = (*bio_orig)->bi_rw; @@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) bio->bi_size = (*bio_orig)->bi_size; if (pool == page_pool) { - if (rw & WRITE) - bio->bi_end_io = bounce_end_io_write; - else + bio->bi_end_io = bounce_end_io_write; + if (rw == READ) bio->bi_end_io = bounce_end_io_read; } else { - if (rw & WRITE) - bio->bi_end_io = bounce_end_io_write_isa; - else + bio->bi_end_io = bounce_end_io_write_isa; + if (rw == READ) bio->bi_end_io = bounce_end_io_read_isa; } @@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) *bio_orig = bio; } +inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) +{ + mempool_t *pool; + int bio_gfp; + + BUG_ON((*bio_orig)->bi_idx); + + /* + * for non-isa bounce case, just check if the bounce pfn is equal + * to or bigger than the highest pfn in the system -- in that case, + * don't waste time iterating over bio segments + */ + if (!(q->bounce_gfp & GFP_DMA)) { + if (q->bounce_pfn >= blk_max_pfn) + return; + + bio_gfp = GFP_NOHIGHIO; + pool = page_pool; + } else { + BUG_ON(!isa_page_pool); + + bio_gfp = GFP_NOIO; + pool = isa_page_pool; + } + + /* + * slow path + */ + __blk_queue_bounce(q, bio_orig, bio_gfp, pool); +} + #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM) void check_highmem_ptes(void) { |
