From d2310fab512bbcfddce8d28380d2562f1db46dfd Mon Sep 17 00:00:00 2001 From: Neil Brown Date: Tue, 18 Jun 2002 04:16:14 -0700 Subject: [PATCH] md 8 of 22 - Discard md_make_request in favour of per-personality make_request functions. As we now have per-device queues, we don't need a common make_request function that dispatches, we can dispatch directly. Each *_make_request function is changed to take a request_queue_t from which it extract the mddev that it needs, and to deduce the "rw" flag directly from the bio. --- drivers/md/linear.c | 13 +++++++------ drivers/md/md.c | 14 +------------- drivers/md/multipath.c | 16 ++++------------ drivers/md/raid0.c | 3 ++- drivers/md/raid1.c | 15 ++++++--------- drivers/md/raid5.c | 14 ++++---------- 6 files changed, 24 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 90a5fe2047ed..b5889b31d491 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -1,6 +1,6 @@ /* linear.c : Multiple Devices driver for Linux - Copyright (C) 1994-96 Marc ZYNGIER + Copyright (C) 1994-96 Marc ZYNGIER or @@ -130,12 +130,13 @@ static int linear_stop (mddev_t *mddev) return 0; } -static int linear_make_request (mddev_t *mddev, int rw, struct bio *bio) +static int linear_make_request (request_queue_t *q, struct bio *bio) { - linear_conf_t *conf = mddev_to_conf(mddev); - struct linear_hash *hash; - dev_info_t *tmp_dev; - long block; + mddev_t *mddev = q->queuedata; + linear_conf_t *conf = mddev_to_conf(mddev); + struct linear_hash *hash; + dev_info_t *tmp_dev; + long block; block = bio->bi_sector >> 1; hash = conf->hash_table + (block / conf->smallest->size); diff --git a/drivers/md/md.c b/drivers/md/md.c index e5ce11d4b821..2dd385b4dc6b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -169,18 +169,6 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev) mddev_map[minor].data = NULL; } -static int md_make_request (request_queue_t *q, struct bio *bio) -{ - mddev_t *mddev = q->queuedata; - - if (mddev && mddev->pers) - return mddev->pers->make_request(mddev, bio_rw(bio), bio); - else { - bio_io_error(bio); - return 0; - } -} - static int md_fail_request (request_queue_t *q, struct bio *bio) { bio_io_error(bio); @@ -1705,7 +1693,7 @@ static int do_md_run(mddev_t * mddev) } mddev->pers = pers[pnum]; - blk_queue_make_request(&mddev->queue, md_make_request); + blk_queue_make_request(&mddev->queue, mddev->pers->make_request); mddev->queue.queuedata = mddev; err = mddev->pers->run(mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 46f089ee8481..77b8075fb8b3 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -243,27 +243,19 @@ static int multipath_read_balance (multipath_conf_t *conf) return 0; } -static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio) +static int multipath_make_request (request_queue_t *q, struct bio * bio) { + mddev_t *mddev = q->queuedata; multipath_conf_t *conf = mddev_to_conf(mddev); struct bio *real_bio; struct multipath_bh * mp_bh; struct multipath_info *multipath; -/* - * make_request() can abort the operation when READA is being - * used and no empty request is available. - * - * Currently, just replace the command with READ/WRITE. - */ - if (rw == READA) - rw = READ; - mp_bh = multipath_alloc_mpbh (conf); mp_bh->master_bio = bio; mp_bh->mddev = mddev; - mp_bh->cmd = rw; + mp_bh->cmd = bio_data_dir(bio); /* * read balancing logic: @@ -272,7 +264,7 @@ static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio) real_bio = bio_clone(bio, GFP_NOIO); real_bio->bi_bdev = multipath->bdev; - real_bio->bi_rw = rw; + real_bio->bi_rw = bio_data_dir(bio); real_bio->bi_end_io = multipath_end_request; real_bio->bi_private = mp_bh; mp_bh->bio = real_bio; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 641b3fef9e10..d172e9681cb5 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -224,8 +224,9 @@ static int raid0_stop (mddev_t *mddev) * Of course, those facts may not be valid anymore (and surely won't...) * Hey guys, there's some work out there ;-) */ -static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio) +static int raid0_make_request (request_queue_t *q, struct bio *bio) { + mddev_t *mddev = q->queuedata; unsigned int sect_in_chunk, chunksize_bits, chunk_size; raid0_conf_t *conf = mddev_to_conf(mddev); struct raid0_hash *hash; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 43fdb75de0fe..cad86c954701 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -433,8 +433,9 @@ static void resume_device(conf_t *conf) spin_unlock_irq(&conf->resync_lock); } -static int make_request(mddev_t *mddev, int rw, struct bio * bio) +static int make_request(request_queue_t *q, struct bio * bio) { + mddev_t *mddev = q->queuedata; conf_t *conf = mddev_to_conf(mddev); mirror_info_t *mirror; r1bio_t *r1_bio; @@ -455,20 +456,16 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) * make_request() can abort the operation when READA is being * used and no empty request is available. * - * Currently, just replace the command with READ. */ - if (rw == READA) - rw = READ; - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio->master_bio = bio; r1_bio->mddev = mddev; r1_bio->sector = bio->bi_sector; - r1_bio->cmd = rw; + r1_bio->cmd = bio_data_dir(bio); - if (rw == READ) { + if (r1_bio->cmd == READ) { /* * read balancing logic: */ @@ -482,7 +479,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) read_bio->bi_sector = r1_bio->sector; read_bio->bi_bdev = mirror->bdev; read_bio->bi_end_io = end_request; - read_bio->bi_rw = rw; + read_bio->bi_rw = r1_bio->cmd; read_bio->bi_private = r1_bio; generic_make_request(read_bio); @@ -506,7 +503,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) mbio->bi_sector = r1_bio->sector; mbio->bi_bdev = conf->mirrors[i].bdev; mbio->bi_end_io = end_request; - mbio->bi_rw = rw; + mbio->bi_rw = r1_bio->cmd; mbio->bi_private = r1_bio; sum_bios++; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8a68abc2e6c9..4859866845ee 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1248,23 +1248,17 @@ static inline void raid5_plug_device(raid5_conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static int make_request (mddev_t *mddev, int rw, struct bio * bi) +static int make_request (request_queue_t *q, struct bio * bi) { + mddev_t *mddev = q->queuedata; raid5_conf_t *conf = mddev_to_conf(mddev); const unsigned int raid_disks = conf->raid_disks; const unsigned int data_disks = raid_disks - 1; unsigned int dd_idx, pd_idx; sector_t new_sector; sector_t logical_sector, last_sector; - int read_ahead = 0; - struct stripe_head *sh; - if (rw == READA) { - rw = READ; - read_ahead=1; - } - logical_sector = bi->bi_sector & ~(STRIPE_SECTORS-1); last_sector = bi->bi_sector + (bi->bi_size>>9); @@ -1279,10 +1273,10 @@ static int make_request (mddev_t *mddev, int rw, struct bio * bi) PRINTK("raid5: make_request, sector %ul logical %ul\n", new_sector, logical_sector); - sh = get_active_stripe(conf, new_sector, pd_idx, read_ahead); + sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK)); if (sh) { - add_stripe_bio(sh, bi, dd_idx, rw); + add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK)); raid5_plug_device(conf); handle_stripe(sh); -- cgit v1.2.3