From 4e5314b56a7ea11c7a5f2b8418992b2f49648a25 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:22 -0800 Subject: [PATCH] md: better handling of readerrors with raid5. This patch changes the behaviour of raid5 when it gets a read error. Instead of just failing the device, it tried to find out what should have been there, and writes it over the bad block. For some media-errors, this has a reasonable chance of fixing the error. If the write succeeds, and a subsequent read succeeds as well, raid5 decided the address is OK and conitnues. Instead of failing a drive on read-error, we attempt to re-write the block, and then re-read. If that all works, we allow the device to remain in the array. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/raid/raid5.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/raid') diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 176fc653c284..f025ba6fb14c 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -154,6 +154,8 @@ struct stripe_head { #define R5_Wantwrite 5 #define R5_Syncio 6 /* this io need to be accounted as resync io */ #define R5_Overlap 7 /* There is a pending overlapping request on this block */ +#define R5_ReadError 8 /* seen a read error here recently */ +#define R5_ReWrite 9 /* have tried to over-write the readerror */ /* * Write method -- cgit v1.2.3 From eae1701fbd264cfc7efbaf7cd4cd999760070e27 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:23 -0800 Subject: [PATCH] md: initial sysfs support for md Start using kobjects in mddevs, and provide a couple of simple attributes (level and disks). Attributes live in /sys/block/mdX/md/attr-name Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++- include/linux/raid/md_k.h | 2 ++ 2 files changed, 87 insertions(+), 1 deletion(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ecf51ee596f..a68ad8547325 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -181,7 +181,7 @@ static void mddev_put(mddev_t *mddev) if (!mddev->raid_disks && list_empty(&mddev->disks)) { list_del(&mddev->all_mddevs); blk_put_queue(mddev->queue); - kfree(mddev); + kobject_unregister(&mddev->kobj); } spin_unlock(&all_mddevs_lock); } @@ -1551,6 +1551,85 @@ static void analyze_sbs(mddev_t * mddev) } +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mddev_t *, char *); + ssize_t (*store)(mddev_t *, const char *, size_t); +}; + +static ssize_t +md_show_level(mddev_t *mddev, char *page) +{ + mdk_personality_t *p = mddev->pers; + if (p == NULL) + return 0; + if (mddev->level >= 0) + return sprintf(page, "RAID-%d\n", mddev->level); + else + return sprintf(page, "%s\n", p->name); +} + +static struct md_sysfs_entry md_level = { + .attr = {.name = "level", .mode = S_IRUGO }, + .show = md_show_level, +}; + +static ssize_t +md_show_rdisks(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->raid_disks); +} + +static struct md_sysfs_entry md_raid_disks = { + .attr = {.name = "raid_disks", .mode = S_IRUGO }, + .show = md_show_rdisks, +}; + +static struct attribute *md_default_attrs[] = { + &md_level.attr, + &md_raid_disks.attr, + NULL, +}; + +static ssize_t +md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + + if (!entry->show) + return -EIO; + return entry->show(mddev, page); +} + +static ssize_t +md_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + + if (!entry->store) + return -EIO; + return entry->store(mddev, page, length); +} + +static void md_free(struct kobject *ko) +{ + mddev_t *mddev = container_of(ko, mddev_t, kobj); + kfree(mddev); +} + +static struct sysfs_ops md_sysfs_ops = { + .show = md_attr_show, + .store = md_attr_store, +}; +static struct kobj_type md_ktype = { + .release = md_free, + .sysfs_ops = &md_sysfs_ops, + .default_attrs = md_default_attrs, +}; + int mdp_major = 0; static struct kobject *md_probe(dev_t dev, int *part, void *data) @@ -1592,6 +1671,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) add_disk(disk); mddev->gendisk = disk; up(&disks_sem); + mddev->kobj.parent = kobject_get(&disk->kobj); + mddev->kobj.k_name = NULL; + snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); + mddev->kobj.ktype = &md_ktype; + kobject_register(&mddev->kobj); return NULL; } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index ebce949b1443..a9b0e47a3d04 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -148,6 +148,8 @@ struct mddev_s struct gendisk *gendisk; + struct kobject kobj; + /* Superblock information */ int major_version, minor_version, -- cgit v1.2.3 From 86e6ffdd243a06663713e637ee683fb27dce8e0c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:24 -0800 Subject: [PATCH] md: extend md sysfs support to component devices. Each device in an md array how has a corresponding /sys/block/mdX/md/devNN/ directory which can contain attributes. Currently there is only 'state' which summarises the state, nd 'super' which has a copy of the superblock, and 'block' which is a symlink to the block device. Also, /sys/block/mdX/md/rdNN represents slot 'NN' in the array, and is a symlink to the relevant 'devNN'. Obviously spare devices do not have a slot in the array, and so don't have such a symlink. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 168 +++++++++++++++++++++++++++++++++++++++++++--- include/linux/raid/md_k.h | 2 + 2 files changed, 162 insertions(+), 8 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index a68ad8547325..74520b50c307 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -711,6 +711,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) */ int i; int active=0, working=0,failed=0,spare=0,nr_disks=0; + unsigned int fixdesc=0; rdev->sb_size = MD_SB_BYTES; @@ -758,16 +759,28 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->disks[0].state = (1<raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) - rdev2->desc_nr = rdev2->raid_disk; + desc_nr = rdev2->raid_disk; else - rdev2->desc_nr = next_spare++; + desc_nr = next_spare++; + if (desc_nr != rdev2->desc_nr) { + fixdesc |= (1 << desc_nr); + rdev2->desc_nr = desc_nr; + if (rdev2->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev2->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + } + sysfs_remove_link(&rdev2->kobj, "block"); + kobject_del(&rdev2->kobj); + } d = &sb->disks[rdev2->desc_nr]; nr_disks++; d->number = rdev2->desc_nr; d->major = MAJOR(rdev2->bdev->bd_dev); d->minor = MINOR(rdev2->bdev->bd_dev); - if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ @@ -787,7 +800,22 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<desc_nr)) { + snprintf(rdev2->kobj.name, KOBJ_NAME_LEN, "dev%d", + rdev2->desc_nr); + kobject_add(&rdev2->kobj); + sysfs_create_link(&rdev2->kobj, + &rdev2->bdev->bd_disk->kobj, + "block"); + if (rdev2->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev2->raid_disk); + sysfs_create_link(&mddev->kobj, + &rdev2->kobj, nm); + } + } /* now set the "removed" and "faulty" bits on any missing devices */ for (i=0 ; i < mddev->raid_disks ; i++) { mdp_disk_t *d = &sb->disks[i]; @@ -1147,6 +1175,13 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) list_add(&rdev->same_set, &mddev->disks); rdev->mddev = mddev; printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b)); + + rdev->kobj.k_name = NULL; + snprintf(rdev->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev->desc_nr); + rdev->kobj.parent = kobject_get(&mddev->kobj); + kobject_add(&rdev->kobj); + + sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block"); return 0; } @@ -1160,6 +1195,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) list_del_init(&rdev->same_set); printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; + sysfs_remove_link(&rdev->kobj, "block"); + kobject_del(&rdev->kobj); } /* @@ -1215,7 +1252,7 @@ static void export_rdev(mdk_rdev_t * rdev) md_autodetect_dev(rdev->bdev->bd_dev); #endif unlock_rdev(rdev); - kfree(rdev); + kobject_put(&rdev->kobj); } static void kick_rdev_from_array(mdk_rdev_t * rdev) @@ -1414,6 +1451,94 @@ repeat: } +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mdk_rdev_t *, char *); + ssize_t (*store)(mdk_rdev_t *, const char *, size_t); +}; + +static ssize_t +rdev_show_state(mdk_rdev_t *rdev, char *page) +{ + char *sep = ""; + int len=0; + + if (rdev->faulty) { + len+= sprintf(page+len, "%sfaulty",sep); + sep = ","; + } + if (rdev->in_sync) { + len += sprintf(page+len, "%sin_sync",sep); + sep = ","; + } + if (!rdev->faulty && !rdev->in_sync) { + len += sprintf(page+len, "%sspare", sep); + sep = ","; + } + return len+sprintf(page+len, "\n"); +} + +static struct rdev_sysfs_entry rdev_state = { + .attr = {.name = "state", .mode = S_IRUGO }, + .show = rdev_show_state, +}; + +static ssize_t +rdev_show_super(mdk_rdev_t *rdev, char *page) +{ + if (rdev->sb_loaded && rdev->sb_size) { + memcpy(page, page_address(rdev->sb_page), rdev->sb_size); + return rdev->sb_size; + } else + return 0; +} +static struct rdev_sysfs_entry rdev_super = { + .attr = {.name = "super", .mode = S_IRUGO }, + .show = rdev_show_super, +}; +static struct attribute *rdev_default_attrs[] = { + &rdev_state.attr, + &rdev_super.attr, + NULL, +}; +static ssize_t +rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + + if (!entry->show) + return -EIO; + return entry->show(rdev, page); +} + +static ssize_t +rdev_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + + if (!entry->store) + return -EIO; + return entry->store(rdev, page, length); +} + +static void rdev_free(struct kobject *ko) +{ + mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); + kfree(rdev); +} +static struct sysfs_ops rdev_sysfs_ops = { + .show = rdev_attr_show, + .store = rdev_attr_store, +}; +static struct kobj_type rdev_ktype = { + .release = rdev_free, + .sysfs_ops = &rdev_sysfs_ops, + .default_attrs = rdev_default_attrs, +}; + /* * Import a device. If 'super_format' >= 0, then sanity check the superblock * @@ -1445,6 +1570,10 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi if (err) goto abort_free; + rdev->kobj.parent = NULL; + rdev->kobj.ktype = &rdev_ktype; + kobject_init(&rdev->kobj); + rdev->desc_nr = -1; rdev->faulty = 0; rdev->in_sync = 0; @@ -1820,6 +1949,13 @@ static int do_md_run(mddev_t * mddev) mddev->safemode_timer.data = (unsigned long) mddev; mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ mddev->in_sync = 1; + + ITERATE_RDEV(mddev,rdev,tmp) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); + } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -1941,9 +2077,18 @@ static int do_md_stop(mddev_t * mddev, int ro) * Free resources if final stop */ if (!ro) { + mdk_rdev_t *rdev; + struct list_head *tmp; struct gendisk *disk; printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); + ITERATE_RDEV(mddev,rdev,tmp) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + } + export_array(mddev); mddev->array_size = 0; @@ -3962,17 +4107,24 @@ void md_check_recovery(mddev_t *mddev) if (rdev->raid_disk >= 0 && (rdev->faulty || ! rdev->in_sync) && atomic_read(&rdev->nr_pending)==0) { - if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) + if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { + char nm[20]; + sprintf(nm,"rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); rdev->raid_disk = -1; + } } if (mddev->degraded) { ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk < 0 && !rdev->faulty) { - if (mddev->pers->hot_add_disk(mddev,rdev)) + if (mddev->pers->hot_add_disk(mddev,rdev)) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); spares++; - else + } else break; } } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index a9b0e47a3d04..d1dad32ebe07 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -105,6 +105,8 @@ struct mdk_rdev_s int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ + struct kobject kobj; + /* A device can be in one of three states based on two flags: * Not working: faulty==1 in_sync==0 * Fully working: faulty==0 in_sync==1 -- cgit v1.2.3 From 3f294f4fb6f2ba887b717674da26c21f3d57f3fc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:25 -0800 Subject: [PATCH] md: add kobject/sysfs support to raid5 /sys/block/mdX/md/raid5/ contains raid5-related attributes. Currently stripe_cache_size is number of entries in stripe cache, and is settable. stripe_cache_active is number of active entries, and in only readable. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 183 +++++++++++++++++++++++++++++++++++++-------- include/linux/raid/raid5.h | 1 + 2 files changed, 152 insertions(+), 32 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8cf1ae8b8a71..121fbaa9ed59 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -293,9 +293,31 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector return sh; } -static int grow_stripes(raid5_conf_t *conf, int num) +static int grow_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; + sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); + if (!sh) + return 0; + memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); + sh->raid_conf = conf; + spin_lock_init(&sh->lock); + + if (grow_buffers(sh, conf->raid_disks)) { + shrink_buffers(sh, conf->raid_disks); + kmem_cache_free(conf->slab_cache, sh); + return 0; + } + /* we just created an active stripe so... */ + atomic_set(&sh->count, 1); + atomic_inc(&conf->active_stripes); + INIT_LIST_HEAD(&sh->lru); + release_stripe(sh); + return 1; +} + +static int grow_stripes(raid5_conf_t *conf, int num) +{ kmem_cache_t *sc; int devs = conf->raid_disks; @@ -308,43 +330,34 @@ static int grow_stripes(raid5_conf_t *conf, int num) return 1; conf->slab_cache = sc; while (num--) { - sh = kmem_cache_alloc(sc, GFP_KERNEL); - if (!sh) - return 1; - memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev)); - sh->raid_conf = conf; - spin_lock_init(&sh->lock); - - if (grow_buffers(sh, conf->raid_disks)) { - shrink_buffers(sh, conf->raid_disks); - kmem_cache_free(sc, sh); + if (!grow_one_stripe(conf)) return 1; - } - /* we just created an active stripe so... */ - atomic_set(&sh->count, 1); - atomic_inc(&conf->active_stripes); - INIT_LIST_HEAD(&sh->lru); - release_stripe(sh); } return 0; } -static void shrink_stripes(raid5_conf_t *conf) +static int drop_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; - while (1) { - spin_lock_irq(&conf->device_lock); - sh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); - if (!sh) - break; - if (atomic_read(&sh->count)) - BUG(); - shrink_buffers(sh, conf->raid_disks); - kmem_cache_free(conf->slab_cache, sh); - atomic_dec(&conf->active_stripes); - } + spin_lock_irq(&conf->device_lock); + sh = get_free_stripe(conf); + spin_unlock_irq(&conf->device_lock); + if (!sh) + return 0; + if (atomic_read(&sh->count)) + BUG(); + shrink_buffers(sh, conf->raid_disks); + kmem_cache_free(conf->slab_cache, sh); + atomic_dec(&conf->active_stripes); + return 1; +} + +static void shrink_stripes(raid5_conf_t *conf) +{ + while (drop_one_stripe(conf)) + ; + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -1714,6 +1727,108 @@ static void raid5d (mddev_t *mddev) PRINTK("--- raid5d inactive\n"); } +struct raid5_sysfs_entry { + struct attribute attr; + ssize_t (*show)(raid5_conf_t *, char *); + ssize_t (*store)(raid5_conf_t *, const char *, ssize_t); +}; + +static ssize_t +raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page) +{ + return sprintf(page, "%d\n", conf->max_nr_stripes); +} + +static ssize_t +raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) +{ + char *end; + int new; + if (len >= PAGE_SIZE) + return -EINVAL; + + new = simple_strtoul(page, &end, 10); + if (!*page || (*end && *end != '\n') ) + return -EINVAL; + if (new <= 16 || new > 32768) + return -EINVAL; + while (new < conf->max_nr_stripes) { + if (drop_one_stripe(conf)) + conf->max_nr_stripes--; + else + break; + } + while (new > conf->max_nr_stripes) { + if (grow_one_stripe(conf)) + conf->max_nr_stripes++; + else break; + } + return len; +} +static struct raid5_sysfs_entry raid5_stripecache_size = { + .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR }, + .show = raid5_show_stripe_cache_size, + .store = raid5_store_stripe_cache_size, +}; + +static ssize_t +raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page) +{ + return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); +} + +static struct raid5_sysfs_entry raid5_stripecache_active = { + .attr = {.name = "stripe_cache_active", .mode = S_IRUGO}, + .show = raid5_show_stripe_cache_active, +}; + +static struct attribute *raid5_default_attrs[] = { + &raid5_stripecache_size.attr, + &raid5_stripecache_active.attr, + NULL, +}; + +static ssize_t +raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); + raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); + + if (!entry->show) + return -EIO; + return entry->show(conf, page); +} + +static ssize_t +raid5_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); + raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); + + if (!entry->store) + return -EIO; + return entry->store(conf, page, length); +} + +static void raid5_free(struct kobject *ko) +{ + raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj); + kfree(conf); +} + + +static struct sysfs_ops raid5_sysfs_ops = { + .show = raid5_attr_show, + .store = raid5_attr_store, +}; + +static struct kobj_type raid5_ktype = { + .release = raid5_free, + .sysfs_ops = &raid5_sysfs_ops, + .default_attrs = raid5_default_attrs, +}; + static int run(mddev_t *mddev) { raid5_conf_t *conf; @@ -1855,6 +1970,10 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + } /* Ok, everything is just fine now */ + conf->kobj.parent = kobject_get(&mddev->kobj); + strcpy(conf->kobj.name, "raid5"); + conf->kobj.ktype = &raid5_ktype; + kobject_register(&conf->kobj); if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; @@ -1879,7 +1998,7 @@ abort: -static int stop (mddev_t *mddev) +static int stop(mddev_t *mddev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; @@ -1888,7 +2007,7 @@ static int stop (mddev_t *mddev) shrink_stripes(conf); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - kfree(conf); + kobject_unregister(&conf->kobj); mddev->private = NULL; return 0; } diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index f025ba6fb14c..5f4e945c8083 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -228,6 +228,7 @@ struct raid5_private_data { * Cleared when a sync completes. */ + struct kobject kobj; /* * Free stripes pool */ -- cgit v1.2.3 From 24dd469d728dae07f40c5d79ea6dedd38cdf1a30 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:26 -0800 Subject: [PATCH] md: allow a manual resync with md You can trigger a 'check' with echo check > /sys/block/mdX/md/scan_mode or a check-and-repair errors with echo repair > /sys/block/mdX/md/scan_mode and read the current state from the same file. Note: personalities need to know the different between 'check' and 'repair', but don't yet. Until they do, 'check' will be the same as 'repair' and will just do a normal resync pass. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 77 +++++++++++++++++++++++++++++++++++++++++------ include/linux/raid/md_k.h | 4 +++ 2 files changed, 72 insertions(+), 9 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 74520b50c307..37400873b879 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1714,9 +1714,60 @@ static struct md_sysfs_entry md_raid_disks = { .show = md_show_rdisks, }; +static ssize_t +md_show_scan(mddev_t *mddev, char *page) +{ + char *type = "none"; + if (mddev->recovery & + ((1<recovery & (1<recovery)) + type = "resync"; + else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + type = "check"; + else + type = "repair"; + } else + type = "recover"; + } + return sprintf(page, "%s\n", type); +} + +static ssize_t +md_store_scan(mddev_t *mddev, const char *page, size_t len) +{ + int canscan=0; + if (mddev->recovery & + ((1<reconfig_sem); + if (mddev->pers && mddev->pers->sync_request) + canscan=1; + up(&mddev->reconfig_sem); + if (!canscan) + return -EINVAL; + + if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) + set_bit(MD_RECOVERY_CHECK, &mddev->recovery); + else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) + return -EINVAL; + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + return len; +} + +static struct md_sysfs_entry md_scan_mode = { + .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR }, + .show = md_show_scan, + .store = md_store_scan, +}; + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, + &md_scan_mode.attr, NULL, }; @@ -3855,7 +3906,8 @@ static void md_do_sync(mddev_t *mddev) is_mddev_idle(mddev); /* this also initializes IO event counters */ /* we don't use the checkpoint if there's a bitmap */ - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap + && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) j = mddev->recovery_cp; else j = 0; @@ -4093,9 +4145,13 @@ void md_check_recovery(mddev_t *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; } - if (mddev->recovery) - /* probably just the RECOVERY_NEEDED flag */ - mddev->recovery = 0; + /* Clear some bits that don't mean anything, but + * might be left set + */ + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_RECOVERY_ERR, &mddev->recovery); + clear_bit(MD_RECOVERY_INTR, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); /* no recovery is running. * remove any failed drives, then @@ -4129,14 +4185,17 @@ void md_check_recovery(mddev_t *mddev) } } - if (!spares && (mddev->recovery_cp == MaxSector )) { - /* nothing we can do ... */ + if (spares) { + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + } else if (mddev->recovery_cp < MaxSector) { + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + /* nothing to be done ... */ goto unlock; - } + if (mddev->pers->sync_request) { set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - if (!spares) - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); if (spares && mddev->bitmap && ! mddev->bitmap->file) { /* We are adding a device or devices to an array * which has the bitmap stored on all devices. diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index d1dad32ebe07..efd04dca0abd 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -182,6 +182,8 @@ struct mddev_s * ERR: and IO error was detected - abort the resync/recovery * INTR: someone requested a (clean) early abort. * DONE: thread is done and is waiting to be reaped + * REQUEST: user-space has requested a sync (used with SYNC) + * CHECK: user-space request for for check-only, no repair */ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 @@ -189,6 +191,8 @@ struct mddev_s #define MD_RECOVERY_INTR 3 #define MD_RECOVERY_DONE 4 #define MD_RECOVERY_NEEDED 5 +#define MD_RECOVERY_REQUESTED 6 +#define MD_RECOVERY_CHECK 7 unsigned long recovery; int in_sync; /* know to not need resync */ -- cgit v1.2.3 From 9d88883e68f404d5581bd391713ceef470ea53a9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:26 -0800 Subject: [PATCH] md: teach raid5 the difference between 'check' and 'repair'. With this, raid5 can be asked to check parity without repairing it. It also keeps a count of the number of incorrect parity blocks found (mismatches) and reports them through sysfs. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 18 ++++++++++++++++-- drivers/md/raid5.c | 5 +++++ include/linux/raid/md_k.h | 4 ++++ 3 files changed, 25 insertions(+), 2 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 37400873b879..e58d61d9f31b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1758,16 +1758,29 @@ md_store_scan(mddev_t *mddev, const char *page, size_t len) return len; } +static ssize_t +md_show_mismatch(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long) mddev->resync_mismatches); +} + static struct md_sysfs_entry md_scan_mode = { .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR }, .show = md_show_scan, .store = md_store_scan, }; +static struct md_sysfs_entry md_mismatches = { + .attr = {.name = "mismatch_cnt", .mode = S_IRUGO }, + .show = md_show_mismatch, +}; + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, &md_scan_mode.attr, + &md_mismatches.attr, NULL, }; @@ -3888,12 +3901,13 @@ static void md_do_sync(mddev_t *mddev) } } while (mddev->curr_resync < 2); - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* resync follows the size requested by the personality, * which defaults to physical size, but can be virtual size */ max_sectors = mddev->resync_max_sectors; - else + mddev->resync_mismatches = 0; + } else /* recovery follows the physical size of devices */ max_sectors = mddev->size << 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 121fbaa9ed59..ce154553aca5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1292,6 +1292,11 @@ static void handle_stripe(struct stripe_head *sh) !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { /* parity is correct (on disc, not in buffer any more) */ set_bit(STRIPE_INSYNC, &sh->state); + } else { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + set_bit(STRIPE_INSYNC, &sh->state); } } if (!test_bit(STRIPE_INSYNC, &sh->state)) { diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index efd04dca0abd..cb8b44d1588b 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -175,6 +175,10 @@ struct mddev_s sector_t resync_mark_cnt;/* blocks written at resync_mark */ sector_t resync_max_sectors; /* may be set by personality */ + + sector_t resync_mismatches; /* count of sectors where + * parity/replica mismatch found + */ /* recovery/resync flags * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started -- cgit v1.2.3 From 007583c9253fed363a0bd71b039e9b40a0f6855e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:30 -0800 Subject: [PATCH] md: change raid5 sysfs attribute to not create a new directory There isn't really a need for raid5 attributes to be an a subdirectory, so this patch moves them from /sys/block/mdX/md/raid5/attribute to /sys/block/mdX/md/attribute This suggests that all md personalities should co-operate about namespace usage, but that shouldn't be a problem. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 6 ---- drivers/md/raid5.c | 72 ++++++++++------------------------------------ include/linux/raid/md_k.h | 7 +++++ include/linux/raid/raid5.h | 1 - 4 files changed, 22 insertions(+), 64 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 013f2f27589c..3db5c3513072 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1683,12 +1683,6 @@ static void analyze_sbs(mddev_t * mddev) } -struct md_sysfs_entry { - struct attribute attr; - ssize_t (*show)(mddev_t *, char *); - ssize_t (*store)(mddev_t *, const char *, size_t); -}; - static ssize_t md_show_level(mddev_t *mddev, char *page) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 246c9b1cc4a3..08a1620b9f8c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1732,21 +1732,17 @@ static void raid5d (mddev_t *mddev) PRINTK("--- raid5d inactive\n"); } -struct raid5_sysfs_entry { - struct attribute attr; - ssize_t (*show)(raid5_conf_t *, char *); - ssize_t (*store)(raid5_conf_t *, const char *, ssize_t); -}; - static ssize_t -raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page) +raid5_show_stripe_cache_size(mddev_t *mddev, char *page) { + raid5_conf_t *conf = mddev_to_conf(mddev); return sprintf(page, "%d\n", conf->max_nr_stripes); } static ssize_t -raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) +raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) { + raid5_conf_t *conf = mddev_to_conf(mddev); char *end; int new; if (len >= PAGE_SIZE) @@ -1770,68 +1766,33 @@ raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) } return len; } -static struct raid5_sysfs_entry raid5_stripecache_size = { + +static struct md_sysfs_entry raid5_stripecache_size = { .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR }, .show = raid5_show_stripe_cache_size, .store = raid5_store_stripe_cache_size, }; static ssize_t -raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page) +raid5_show_stripe_cache_active(mddev_t *mddev, char *page) { + raid5_conf_t *conf = mddev_to_conf(mddev); return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); } -static struct raid5_sysfs_entry raid5_stripecache_active = { +static struct md_sysfs_entry raid5_stripecache_active = { .attr = {.name = "stripe_cache_active", .mode = S_IRUGO}, .show = raid5_show_stripe_cache_active, }; -static struct attribute *raid5_default_attrs[] = { +static struct attribute *raid5_attrs[] = { &raid5_stripecache_size.attr, &raid5_stripecache_active.attr, NULL, }; - -static ssize_t -raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page) -{ - struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); - raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); - - if (!entry->show) - return -EIO; - return entry->show(conf, page); -} - -static ssize_t -raid5_attr_store(struct kobject *kobj, struct attribute *attr, - const char *page, size_t length) -{ - struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); - raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); - - if (!entry->store) - return -EIO; - return entry->store(conf, page, length); -} - -static void raid5_free(struct kobject *ko) -{ - raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj); - kfree(conf); -} - - -static struct sysfs_ops raid5_sysfs_ops = { - .show = raid5_attr_show, - .store = raid5_attr_store, -}; - -static struct kobj_type raid5_ktype = { - .release = raid5_free, - .sysfs_ops = &raid5_sysfs_ops, - .default_attrs = raid5_default_attrs, +static struct attribute_group raid5_attrs_group = { + .name = NULL, + .attrs = raid5_attrs, }; static int run(mddev_t *mddev) @@ -1975,10 +1936,7 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + } /* Ok, everything is just fine now */ - conf->kobj.parent = &mddev->kobj; - strcpy(conf->kobj.name, "raid5"); - conf->kobj.ktype = &raid5_ktype; - kobject_register(&conf->kobj); + sysfs_create_group(&mddev->kobj, &raid5_attrs_group); if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; @@ -2012,7 +1970,7 @@ static int stop(mddev_t *mddev) shrink_stripes(conf); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - kobject_unregister(&conf->kobj); + sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); mddev->private = NULL; return 0; } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index cb8b44d1588b..4169c11e5451 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -282,6 +282,13 @@ struct mdk_personality_s }; +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mddev_t *, char *); + ssize_t (*store)(mddev_t *, const char *, size_t); +}; + + static inline char * mdname (mddev_t * mddev) { return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 5f4e945c8083..f025ba6fb14c 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -228,7 +228,6 @@ struct raid5_private_data { * Cleared when a sync completes. */ - struct kobject kobj; /* * Free stripes pool */ -- cgit v1.2.3 From ba22dcbf106338a5c46d6979f9b19564faae3d49 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:31 -0800 Subject: [PATCH] md: improvements to raid5 handling of read errors Two refinements to the 'attempt-overwrite-on-read-error' mechanism. 1/ If the array is read-only, don't attempt an over-write. 2/ If there are more than max_nr_stripes read errors on a device with no success, fail the drive. This will make sure a dead drive will be eventually kicked even when we aren't trying to rewrite (which would normally kick a dead drive more quickly. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 1 + drivers/md/raid5.c | 25 +++++++++++++++++-------- include/linux/raid/md_k.h | 3 +++ 3 files changed, 21 insertions(+), 8 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 3db5c3513072..3fb80397f8aa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1582,6 +1582,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi rdev->in_sync = 0; rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); + atomic_set(&rdev->read_errors, 0); size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; if (!size) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 08a1620b9f8c..77610b98d4e0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -420,21 +420,29 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } + if (atomic_read(&conf->disks[i].rdev->read_errors)) + atomic_set(&conf->disks[i].rdev->read_errors, 0); } else { + int retry = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - if (conf->mddev->degraded) { + atomic_inc(&conf->disks[i].rdev->read_errors); + if (conf->mddev->degraded) printk("R5: read error not correctable.\n"); - clear_bit(R5_ReadError, &sh->dev[i].flags); - clear_bit(R5_ReWrite, &sh->dev[i].flags); - md_error(conf->mddev, conf->disks[i].rdev); - } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { + else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ printk("R5: read error NOT corrected!!\n"); + else if (atomic_read(&conf->disks[i].rdev->read_errors) + > conf->max_nr_stripes) + printk("raid5: Too many read errors, failing device.\n"); + else + retry = 1; + if (retry) + set_bit(R5_ReadError, &sh->dev[i].flags); + else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); md_error(conf->mddev, conf->disks[i].rdev); - } else - set_bit(R5_ReadError, &sh->dev[i].flags); + } } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); #if 0 @@ -1328,7 +1336,8 @@ static void handle_stripe(struct stripe_head *sh) /* If the failed drive is just a ReadError, then we might need to progress * the repair/check process */ - if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags) + if (failed == 1 && ! conf->mddev->ro && + test_bit(R5_ReadError, &sh->dev[failed_num].flags) && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) ) { diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 4169c11e5451..200c69e34fc0 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -134,6 +134,9 @@ struct mdk_rdev_s * only maintained for arrays that * support hot removal */ + atomic_t read_errors; /* number of consecutive read errors that + * we have tried to ignore. + */ }; typedef struct mdk_personality_s mdk_personality_t; -- cgit v1.2.3 From b2d444d7ad975d555bb919601bcdc0e58975a40e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:31 -0800 Subject: [PATCH] md: convert 'faulty' and 'in_sync' fields to bits in 'flags' field This has the advantage of removing the confusion caused by 'rdev_t' and 'mddev_t' both having 'in_sync' fields. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 6 ++-- drivers/md/md.c | 92 +++++++++++++++++++++++------------------------ drivers/md/multipath.c | 23 ++++++------ drivers/md/raid1.c | 52 +++++++++++++-------------- drivers/md/raid10.c | 41 ++++++++++----------- drivers/md/raid5.c | 36 +++++++++---------- drivers/md/raid6main.c | 32 ++++++++--------- include/linux/raid/md_k.h | 8 ++--- 8 files changed, 146 insertions(+), 144 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e59694bc5758..c5fa4c2a5af1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -271,7 +271,8 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde return ERR_PTR(-ENOMEM); ITERATE_RDEV(mddev, rdev, tmp) { - if (! rdev->in_sync || rdev->faulty) + if (! test_bit(In_sync, &rdev->flags) + || test_bit(Faulty, &rdev->flags)) continue; target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512); @@ -291,7 +292,8 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai struct list_head *tmp; ITERATE_RDEV(mddev, rdev, tmp) - if (rdev->in_sync && !rdev->faulty) + if (test_bit(In_sync, &rdev->flags) + && !test_bit(Faulty, &rdev->flags)) md_super_write(mddev, rdev, (rdev->sb_offset<<1) + offset + page->index * (PAGE_SIZE/512), diff --git a/drivers/md/md.c b/drivers/md/md.c index 3fb80397f8aa..9dfa063d1c6a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -610,7 +610,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); rdev->raid_disk = -1; - rdev->in_sync = 0; + rdev->flags = 0; if (mddev->raid_disks == 0) { mddev->major_version = 0; mddev->minor_version = sb->minor_version; @@ -671,21 +671,19 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) return 0; if (mddev->level != LEVEL_MULTIPATH) { - rdev->faulty = 0; - rdev->flags = 0; desc = sb->disks + rdev->desc_nr; if (desc->state & (1<faulty = 1; + set_bit(Faulty, &rdev->flags); else if (desc->state & (1<raid_disk < mddev->raid_disks) { - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); rdev->raid_disk = desc->raid_disk; } if (desc->state & (1<flags); } else /* MULTIPATH are always insync */ - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); return 0; } @@ -761,7 +759,8 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) ITERATE_RDEV(mddev,rdev2,tmp) { mdp_disk_t *d; int desc_nr; - if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) desc_nr = rdev2->raid_disk; else desc_nr = next_spare++; @@ -780,14 +779,15 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) d->number = rdev2->desc_nr; d->major = MAJOR(rdev2->bdev->bd_dev); d->minor = MINOR(rdev2->bdev->bd_dev); - if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ - if (rdev2->faulty) { + if (test_bit(Faulty, &rdev2->flags)) { d->state = (1<in_sync) { + } else if (test_bit(In_sync, &rdev2->flags)) { d->state = (1<state |= (1<sb_page); rdev->raid_disk = -1; - rdev->in_sync = 0; + rdev->flags = 0; if (mddev->raid_disks == 0) { mddev->major_version = 1; mddev->patch_version = 0; @@ -1027,22 +1027,19 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); switch(role) { case 0xffff: /* spare */ - rdev->faulty = 0; break; case 0xfffe: /* faulty */ - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); break; default: - rdev->in_sync = 1; - rdev->faulty = 0; + set_bit(In_sync, &rdev->flags); rdev->raid_disk = role; break; } - rdev->flags = 0; if (sb->devflags & WriteMostly1) set_bit(WriteMostly, &rdev->flags); } else /* MULTIPATH are always insync */ - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); return 0; } @@ -1086,9 +1083,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) ITERATE_RDEV(mddev,rdev2,tmp) { i = rdev2->desc_nr; - if (rdev2->faulty) + if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(0xfffe); - else if (rdev2->in_sync) + else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(0xffff); @@ -1327,7 +1324,8 @@ static void print_rdev(mdk_rdev_t *rdev) char b[BDEVNAME_SIZE]; printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", bdevname(rdev->bdev,b), (unsigned long long)rdev->size, - rdev->faulty, rdev->in_sync, rdev->desc_nr); + test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), + rdev->desc_nr); if (rdev->sb_loaded) { printk(KERN_INFO "md: rdev superblock:\n"); print_sb((mdp_super_t*)page_address(rdev->sb_page)); @@ -1421,11 +1419,11 @@ repeat: ITERATE_RDEV(mddev,rdev,tmp) { char b[BDEVNAME_SIZE]; dprintk(KERN_INFO "md: "); - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) dprintk("(skipping faulty "); dprintk("%s ", bdevname(rdev->bdev,b)); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { md_super_write(mddev,rdev, rdev->sb_offset<<1, rdev->sb_size, rdev->sb_page); @@ -1466,15 +1464,16 @@ rdev_show_state(mdk_rdev_t *rdev, char *page) char *sep = ""; int len=0; - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { len+= sprintf(page+len, "%sfaulty",sep); sep = ","; } - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { len += sprintf(page+len, "%sin_sync",sep); sep = ","; } - if (!rdev->faulty && !rdev->in_sync) { + if (!test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags)) { len += sprintf(page+len, "%sspare", sep); sep = ","; } @@ -1578,8 +1577,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi kobject_init(&rdev->kobj); rdev->desc_nr = -1; - rdev->faulty = 0; - rdev->in_sync = 0; + rdev->flags = 0; rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); @@ -1670,7 +1668,7 @@ static void analyze_sbs(mddev_t * mddev) if (mddev->level == LEVEL_MULTIPATH) { rdev->desc_nr = i++; rdev->raid_disk = rdev->desc_nr; - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); } } @@ -1939,7 +1937,7 @@ static int do_md_run(mddev_t * mddev) /* devices must have minimum size of one chunk */ ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) continue; if (rdev->size < chunk_size / 1024) { printk(KERN_WARNING @@ -1967,7 +1965,7 @@ static int do_md_run(mddev_t * mddev) * Also find largest hardsector size */ ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); invalidate_bdev(rdev->bdev, 0); @@ -2304,7 +2302,7 @@ static int autostart_array(dev_t startdev) return err; } - if (start_rdev->faulty) { + if (test_bit(Faulty, &start_rdev->flags)) { printk(KERN_WARNING "md: can not autostart based on faulty %s!\n", bdevname(start_rdev->bdev,b)); @@ -2363,11 +2361,11 @@ static int get_array_info(mddev_t * mddev, void __user * arg) nr=working=active=failed=spare=0; ITERATE_RDEV(mddev,rdev,tmp) { nr++; - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) failed++; else { working++; - if (rdev->in_sync) + if (test_bit(In_sync, &rdev->flags)) active++; else spare++; @@ -2458,9 +2456,9 @@ static int get_disk_info(mddev_t * mddev, void __user * arg) info.minor = MINOR(rdev->bdev->bd_dev); info.raid_disk = rdev->raid_disk; info.state = 0; - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) info.state |= (1<in_sync) { + else if (test_bit(In_sync, &rdev->flags)) { info.state |= (1<saved_raid_disk = rdev->raid_disk; - rdev->in_sync = 0; /* just to be sure */ + clear_bit(In_sync, &rdev->flags); /* just to be sure */ if (info->state & (1<flags); @@ -2591,11 +2589,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) else rdev->raid_disk = -1; - rdev->faulty = 0; + rdev->flags = 0; + if (rdev->raid_disk < mddev->raid_disks) - rdev->in_sync = (info->state & (1<in_sync = 0; + if (info->state & (1<flags); if (info->state & (1<flags); @@ -2694,14 +2692,14 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) goto abort_export; } - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { printk(KERN_WARNING "md: can not hot-add faulty %s disk to %s!\n", bdevname(rdev->bdev,b), mdname(mddev)); err = -EINVAL; goto abort_export; } - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; bind_rdev_to_array(rdev, mddev); @@ -3428,7 +3426,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) return; } - if (!rdev || rdev->faulty) + if (!rdev || test_bit(Faulty, &rdev->flags)) return; /* dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", @@ -3626,7 +3624,7 @@ static int md_seq_show(struct seq_file *seq, void *v) bdevname(rdev->bdev,b), rdev->desc_nr); if (test_bit(WriteMostly, &rdev->flags)) seq_printf(seq, "(W)"); - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { seq_printf(seq, "(F)"); continue; } else if (rdev->raid_disk < 0) @@ -4174,7 +4172,7 @@ void md_check_recovery(mddev_t *mddev) */ ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk >= 0 && - (rdev->faulty || ! rdev->in_sync) && + (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && atomic_read(&rdev->nr_pending)==0) { if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { char nm[20]; @@ -4187,7 +4185,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev->degraded) { ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk < 0 - && !rdev->faulty) { + && !test_bit(Faulty, &rdev->flags)) { if (mddev->pers->hot_add_disk(mddev,rdev)) { char nm[20]; sprintf(nm, "rd%d", rdev->raid_disk); @@ -4347,7 +4345,7 @@ static void autostart_arrays(int part) if (IS_ERR(rdev)) continue; - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { MD_BUG(); continue; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index ae2c5fd6105d..145cdc5ad008 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -64,7 +64,7 @@ static int multipath_map (multipath_conf_t *conf) rcu_read_lock(); for (i = 0; i < disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && rdev->in_sync) { + if (rdev && test_bit(In_sync, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return i; @@ -140,7 +140,8 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) + && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -211,7 +212,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->multipaths[i].rdev && - conf->multipaths[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); } @@ -225,7 +226,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -265,10 +266,10 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) /* * Mark disk as unusable */ - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { char b[BDEVNAME_SIZE]; - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; conf->working_disks--; printk(KERN_ALERT "multipath: IO failure on %s," @@ -298,7 +299,7 @@ static void print_multipath_conf (multipath_conf_t *conf) tmp = conf->multipaths + i; if (tmp->rdev) printk(" disk%d, o:%d, dev:%s\n", - i,!tmp->rdev->faulty, + i,!test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -330,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) conf->working_disks++; rdev->raid_disk = path; - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); rcu_assign_pointer(p->rdev, rdev); found = 1; } @@ -350,7 +351,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number); err = -EBUSY; @@ -482,7 +483,7 @@ static int multipath_run (mddev_t *mddev) mddev->queue->max_sectors > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); - if (!rdev->faulty) + if (!test_bit(Faulty, &rdev->flags)) conf->working_disks++; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f12fc288f25d..fb6b866c28f5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -417,11 +417,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) new_disk = 0; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); - !rdev || !rdev->in_sync + !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) wonly_disk = new_disk; if (new_disk == conf->raid_disks - 1) { @@ -435,11 +435,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* make sure the disk is operational */ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); - !rdev || !rdev->in_sync || + !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) wonly_disk = new_disk; if (new_disk <= 0) @@ -477,7 +477,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) rdev = rcu_dereference(conf->mirrors[disk].rdev); if (!rdev || - !rdev->in_sync || + !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags)) continue; @@ -500,7 +500,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) if (!rdev) goto retry; atomic_inc(&rdev->nr_pending); - if (!rdev->in_sync) { + if (!test_bit(In_sync, &rdev->flags)) { /* cannot risk returning a device that failed * before we inc'ed nr_pending */ @@ -523,7 +523,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -557,7 +557,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -729,9 +729,9 @@ static int make_request(request_queue_t *q, struct bio * bio) rcu_read_lock(); for (i = 0; i < disks; i++) { if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL && - !rdev->faulty) { + !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { atomic_dec(&rdev->nr_pending); r1_bio->bios[i] = NULL; } else @@ -824,7 +824,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf(seq, "%s", conf->mirrors[i].rdev && - conf->mirrors[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); seq_printf(seq, "]"); } @@ -840,14 +840,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * next level up know. * else mark the drive as failed */ - if (rdev->in_sync + if (test_bit(In_sync, &rdev->flags) && conf->working_disks == 1) /* * Don't fail the drive, act as though we were just a * normal single drive */ return; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { mddev->degraded++; conf->working_disks--; /* @@ -855,8 +855,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n" " Operation continuing on %d devices\n", @@ -881,7 +881,7 @@ static void print_conf(conf_t *conf) tmp = conf->mirrors + i; if (tmp->rdev) printk(" disk %d, wo:%d, o:%d, dev:%s\n", - i, !tmp->rdev->in_sync, !tmp->rdev->faulty, + i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -913,11 +913,11 @@ static int raid1_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->mirrors + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { conf->working_disks++; mddev->degraded--; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } @@ -972,7 +972,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) print_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -1282,11 +1282,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* make sure disk is operational */ wonly = disk; while (conf->mirrors[disk].rdev == NULL || - !conf->mirrors[disk].rdev->in_sync || + !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) || test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags) ) { if (conf->mirrors[disk].rdev && - conf->mirrors[disk].rdev->in_sync) + test_bit(In_sync, &conf->mirrors[disk].rdev->flags)) wonly = disk; if (disk <= 0) disk = conf->raid_disks; @@ -1333,10 +1333,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio->bi_rw = READ; bio->bi_end_io = end_sync_read; } else if (conf->mirrors[i].rdev == NULL || - conf->mirrors[i].rdev->faulty) { + test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { still_degraded = 1; continue; - } else if (!conf->mirrors[i].rdev->in_sync || + } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || sector_nr + RESYNC_SECTORS > mddev->recovery_cp) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; @@ -1478,7 +1478,7 @@ static int run(mddev_t *mddev) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; - if (!rdev->faulty && rdev->in_sync) + if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags)) conf->working_disks++; } conf->raid_disks = mddev->raid_disks; @@ -1518,7 +1518,7 @@ static int run(mddev_t *mddev) */ for (j = 0; j < conf->raid_disks && (!conf->mirrors[j].rdev || - !conf->mirrors[j].rdev->in_sync) ; j++) + !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) /* nothing */; conf->last_used = j; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 26114f40bde6..867f06ae33d9 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -512,7 +512,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) disk = r10_bio->devs[slot].devnum; while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || - !rdev->in_sync) { + !test_bit(In_sync, &rdev->flags)) { slot++; if (slot == conf->copies) { slot = 0; @@ -529,7 +529,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = 0; disk = r10_bio->devs[slot].devnum; while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || - !rdev->in_sync) { + !test_bit(In_sync, &rdev->flags)) { slot ++; if (slot == conf->copies) { disk = -1; @@ -549,7 +549,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || - !rdev->in_sync) + !test_bit(In_sync, &rdev->flags)) continue; if (!atomic_read(&rdev->nr_pending)) { @@ -585,7 +585,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -616,7 +616,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -771,7 +771,7 @@ static int make_request(request_queue_t *q, struct bio * bio) int d = r10_bio->devs[i].devnum; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && - !rdev->faulty) { + !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r10_bio->devs[i].bio = bio; } else @@ -826,7 +826,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf(seq, "%s", conf->mirrors[i].rdev && - conf->mirrors[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); seq_printf(seq, "]"); } @@ -841,7 +841,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * next level up know. * else mark the drive as failed */ - if (rdev->in_sync + if (test_bit(In_sync, &rdev->flags) && conf->working_disks == 1) /* * Don't fail the drive, just return an IO error. @@ -851,7 +851,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * really dead" tests... */ return; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { mddev->degraded++; conf->working_disks--; /* @@ -859,8 +859,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n" " Operation continuing on %d devices\n", @@ -885,7 +885,8 @@ static void print_conf(conf_t *conf) tmp = conf->mirrors + i; if (tmp->rdev) printk(" disk %d, wo:%d, o:%d, dev:%s\n", - i, !tmp->rdev->in_sync, !tmp->rdev->faulty, + i, !test_bit(In_sync, &tmp->rdev->flags), + !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -938,11 +939,11 @@ static int raid10_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->mirrors + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { conf->working_disks++; mddev->degraded--; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } @@ -1000,7 +1001,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) print_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -1416,7 +1417,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; iraid_disks; i++) if (conf->mirrors[i].rdev && - !conf->mirrors[i].rdev->in_sync) { + !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { /* want to reconstruct this device */ r10bio_t *rb2 = r10_bio; @@ -1437,7 +1438,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (j=0; jcopies;j++) { int d = r10_bio->devs[j].devnum; if (conf->mirrors[d].rdev && - conf->mirrors[d].rdev->in_sync) { + test_bit(In_sync, &conf->mirrors[d].rdev->flags)) { /* This is where we read from */ bio = r10_bio->devs[0].bio; bio->bi_next = biolist; @@ -1513,7 +1514,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio = r10_bio->devs[i].bio; bio->bi_end_io = NULL; if (conf->mirrors[d].rdev == NULL || - conf->mirrors[d].rdev->faulty) + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) continue; atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); @@ -1699,7 +1700,7 @@ static int run(mddev_t *mddev) mddev->queue->max_sectors = (PAGE_SIZE>>9); disk->head_position = 0; - if (!rdev->faulty && rdev->in_sync) + if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags)) conf->working_disks++; } conf->raid_disks = mddev->raid_disks; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77610b98d4e0..d1c488b008af 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -525,19 +525,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) raid5_conf_t *conf = (raid5_conf_t *) mddev->private; PRINTK("raid5: error called\n"); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { mddev->sb_dirty = 1; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { conf->working_disks--; mddev->degraded++; conf->failed_disks++; - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); /* * if recovery was running, make sure it aborts. */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); printk (KERN_ALERT "raid5: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", @@ -1003,12 +1003,12 @@ static void handle_stripe(struct stripe_head *sh) } if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ - if (!rdev || !rdev->in_sync) { + if (!rdev || !test_bit(In_sync, &rdev->flags)) { /* The ReadError flag wil just be confusing now */ clear_bit(R5_ReadError, &dev->flags); clear_bit(R5_ReWrite, &dev->flags); } - if (!rdev || !rdev->in_sync + if (!rdev || !test_bit(In_sync, &rdev->flags) || test_bit(R5_ReadError, &dev->flags)) { failed++; failed_num = i; @@ -1027,7 +1027,7 @@ static void handle_stripe(struct stripe_head *sh) if (test_bit(R5_ReadError, &sh->dev[i].flags)) { mdk_rdev_t *rdev = conf->disks[i].rdev; - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) /* multiple read failures in one stripe */ md_error(conf->mddev, rdev); } @@ -1384,7 +1384,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && rdev->faulty) + if (rdev && test_bit(Faulty, &rdev->flags)) rdev = NULL; if (rdev) atomic_inc(&rdev->nr_pending); @@ -1458,7 +1458,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -1503,7 +1503,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -1850,7 +1850,7 @@ static int run(mddev_t *mddev) disk->rdev = rdev; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "raid5: device %s operational as raid" " disk %d\n", bdevname(rdev->bdev,b), @@ -2029,7 +2029,7 @@ static void status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->disks[i].rdev && - conf->disks[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); #if RAID5_DEBUG #define D(x) \ @@ -2056,7 +2056,7 @@ static void print_raid5_conf (raid5_conf_t *conf) tmp = conf->disks + i; if (tmp->rdev) printk(" disk %d, o:%d, dev:%s\n", - i, !tmp->rdev->faulty, + i, !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -2070,12 +2070,12 @@ static int raid5_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->disks + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { mddev->degraded--; conf->failed_disks--; conf->working_disks++; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } print_raid5_conf(conf); @@ -2092,7 +2092,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) print_raid5_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -2127,7 +2127,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ for (disk=0; disk < mddev->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; found = 1; if (rdev->saved_raid_disk != disk) diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 84f3ee01e4c8..eae5a35629c5 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -507,19 +507,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) raid6_conf_t *conf = (raid6_conf_t *) mddev->private; PRINTK("raid6: error called\n"); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { mddev->sb_dirty = 1; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { conf->working_disks--; mddev->degraded++; conf->failed_disks++; - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); /* * if recovery was running, make sure it aborts. */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); printk (KERN_ALERT "raid6: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", @@ -1071,7 +1071,7 @@ static void handle_stripe(struct stripe_head *sh) } if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ - if (!rdev || !rdev->in_sync) { + if (!rdev || !test_bit(In_sync, &rdev->flags)) { if ( failed < 2 ) failed_num[failed] = i; failed++; @@ -1465,7 +1465,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && rdev->faulty) + if (rdev && test_bit(Faulty, &rdev->flags)) rdev = NULL; if (rdev) atomic_inc(&rdev->nr_pending); @@ -1539,7 +1539,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -1584,7 +1584,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -1868,7 +1868,7 @@ static int run(mddev_t *mddev) disk->rdev = rdev; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "raid6: device %s operational as raid" " disk %d\n", bdevname(rdev->bdev,b), @@ -2052,7 +2052,7 @@ static void status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->disks[i].rdev && - conf->disks[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); #if RAID6_DUMPSTATE seq_printf (seq, "\n"); @@ -2078,7 +2078,7 @@ static void print_raid6_conf (raid6_conf_t *conf) tmp = conf->disks + i; if (tmp->rdev) printk(" disk %d, o:%d, dev:%s\n", - i, !tmp->rdev->faulty, + i, !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -2092,12 +2092,12 @@ static int raid6_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->disks + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { mddev->degraded--; conf->failed_disks--; conf->working_disks++; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } print_raid6_conf(conf); @@ -2114,7 +2114,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number) print_raid6_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -2149,7 +2149,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ for (disk=0; disk < mddev->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; found = 1; if (rdev->saved_raid_disk != disk) diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 200c69e34fc0..11629f92180a 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -117,10 +117,10 @@ struct mdk_rdev_s * It can never have faulty==1, in_sync==1 * This reduces the burden of testing multiple flags in many cases */ - int faulty; /* if faulty do not issue IO requests */ - int in_sync; /* device is a full member of the array */ - unsigned long flags; /* Should include faulty and in_sync here. */ + unsigned long flags; +#define Faulty 1 /* device is known to have a fault */ +#define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ int desc_nr; /* descriptor index in the superblock */ @@ -247,7 +247,7 @@ struct mddev_s static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) { - int faulty = rdev->faulty; + int faulty = test_bit(Faulty, &rdev->flags); if (atomic_dec_and_test(&rdev->nr_pending) && faulty) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } -- cgit v1.2.3 From bd926c63b7a6843d3ce2728396c0891e54fce5c4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:32 -0800 Subject: [PATCH] md: make md on-disk bitmaps not host-endian Current bitmaps use set_bit et.al and so are host-endian, which means not-portable. Oops. Define a new version number (4) for which bitmaps are little-endian. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 22 ++++++++++++++++++---- drivers/md/md.c | 2 +- include/linux/raid/bitmap.h | 11 +++++++++-- include/linux/raid/md.h | 4 +++- 4 files changed, 31 insertions(+), 8 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c5fa4c2a5af1..220273e81ed6 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -482,7 +482,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; - else if (sb->version != cpu_to_le32(BITMAP_MAJOR)) + else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || + le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) reason = "bitmap chunksize out of range (512B - 4MB)"; @@ -527,6 +528,8 @@ success: bitmap->daemon_lastrun = jiffies; bitmap->max_write_behind = write_behind; bitmap->flags |= sb->state; + if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) + bitmap->flags |= BITMAP_HOSTENDIAN; bitmap->events_cleared = le64_to_cpu(sb->events_cleared); if (sb->state & BITMAP_STALE) bitmap->events_cleared = bitmap->mddev->events; @@ -764,7 +767,10 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); - set_bit(bit, kaddr); + if (bitmap->flags & BITMAP_HOSTENDIAN) + set_bit(bit, kaddr); + else + ext2_set_bit(bit, kaddr); kunmap_atomic(kaddr, KM_USER0); PRINTK("set file bit %lu page %lu\n", bit, page->index); @@ -891,6 +897,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) oldindex = ~0L; for (i = 0; i < chunks; i++) { + int b; index = file_page_index(i); bit = file_page_offset(i); if (index != oldindex) { /* this is a new page, read it in */ @@ -939,7 +946,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) bitmap->filemap[bitmap->file_pages++] = page; } - if (test_bit(bit, page_address(page))) { + if (bitmap->flags & BITMAP_HOSTENDIAN) + b = test_bit(bit, page_address(page)); + else + b = ext2_test_bit(bit, page_address(page)); + if (b) { /* if the disk bit is set, set the memory bit */ bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start) @@ -1097,7 +1108,10 @@ int bitmap_daemon_work(struct bitmap *bitmap) -1); /* clear the bit */ - clear_bit(file_page_offset(j), page_address(page)); + if (bitmap->flags & BITMAP_HOSTENDIAN) + clear_bit(file_page_offset(j), page_address(page)); + else + ext2_clear_bit(file_page_offset(j), page_address(page)); } } spin_unlock_irqrestore(&bitmap->lock, flags); diff --git a/drivers/md/md.c b/drivers/md/md.c index 9dfa063d1c6a..caa4add00c1b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4281,7 +4281,7 @@ static int __init md_init(void) " MD_SB_DISKS=%d\n", MD_MAJOR_VERSION, MD_MINOR_VERSION, MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); - printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR, + printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, BITMAP_MINOR); if (register_blkdev(MAJOR_NR, "md")) diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index 9de99198caf1..899437802aea 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h @@ -6,7 +6,13 @@ #ifndef BITMAP_H #define BITMAP_H 1 -#define BITMAP_MAJOR 3 +#define BITMAP_MAJOR_LO 3 +/* version 4 insists the bitmap is in little-endian order + * with version 3, it is host-endian which is non-portable + */ +#define BITMAP_MAJOR_HI 4 +#define BITMAP_MAJOR_HOSTENDIAN 3 + #define BITMAP_MINOR 39 /* @@ -133,7 +139,8 @@ typedef __u16 bitmap_counter_t; /* use these for bitmap->flags and bitmap->sb->state bit-fields */ enum bitmap_state { BITMAP_ACTIVE = 0x001, /* the bitmap is in use */ - BITMAP_STALE = 0x002 /* the bitmap file is out of date or had -EIO */ + BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ + BITMAP_HOSTENDIAN = 0x8000, }; /* the superblock at the front of the bitmap file -- little endian */ diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index ffa316ce4dc8..91467a3c4a52 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -66,8 +66,10 @@ * and major_version/minor_version accordingly * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT * in the super status byte + * >=3 means that bitmap superblock version 4 is supported, which uses + * little-ending representation rather than host-endian */ -#define MD_PATCHLEVEL_VERSION 2 +#define MD_PATCHLEVEL_VERSION 3 extern int register_md_personality (int p_num, mdk_personality_t *p); extern int unregister_md_personality (int p_num); -- cgit v1.2.3 From a9701a30470856408d08657eb1bd7ae29a146190 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:34 -0800 Subject: [PATCH] md: support BIO_RW_BARRIER for md/raid1 We can only accept BARRIER requests if all slaves handle barriers, and that can, of course, change with time.... So we keep track of whether the whole array seems safe for barriers, and also whether each individual rdev handles barriers. We initially assumes barriers are OK. When writing the superblock we try a barrier, and if that fails, we flag things for no-barriers. This will usually clear the flags fairly quickly. If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to resubmit, so introduce function "md_super_wait" which waits for requests to finish, and retries ENOTSUPP requests without the barrier flag. When writing the real raid1, write requests which were BIO_RW_BARRIER but which aresn't supported need to be retried. So raid1d is enhanced to do this, and when any bio write completes (i.e. no retry needed) we remove it from the r1bio, so that devices needing retry are easy to find. We should hardly ever get -ENOTSUPP errors when writing data to the raid. It should only happen if: 1/ the device used to support BARRIER, but now doesn't. Few devices change like this, though raid1 can! or 2/ the array has no persistent superblock, so there was no opportunity to pre-test for barriers when writing the superblock. Signed-off-by: Neil Brown Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 5 +- drivers/md/md.c | 99 +++++++++++++++++++++++++++------ drivers/md/raid1.c | 134 +++++++++++++++++++++++++++++++-------------- include/linux/raid/md.h | 1 + include/linux/raid/md_k.h | 8 +++ include/linux/raid/raid1.h | 4 +- 6 files changed, 189 insertions(+), 62 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 220273e81ed6..51315302a85e 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -301,7 +301,7 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai page); if (wait) - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); return 0; } @@ -828,8 +828,7 @@ int bitmap_unplug(struct bitmap *bitmap) wake_up_process(bitmap->writeback_daemon->tsk)); spin_unlock_irq(&bitmap->write_lock); } else - wait_event(bitmap->mddev->sb_wait, - atomic_read(&bitmap->mddev->pending_writes)==0); + md_super_wait(bitmap->mddev); } return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index caa4add00c1b..199016932de5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -330,18 +330,46 @@ static void free_disk_sb(mdk_rdev_t * rdev) static int super_written(struct bio *bio, unsigned int bytes_done, int error) { mdk_rdev_t *rdev = bio->bi_private; + mddev_t *mddev = rdev->mddev; if (bio->bi_size) return 1; if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) - md_error(rdev->mddev, rdev); + md_error(mddev, rdev); - if (atomic_dec_and_test(&rdev->mddev->pending_writes)) - wake_up(&rdev->mddev->sb_wait); + if (atomic_dec_and_test(&mddev->pending_writes)) + wake_up(&mddev->sb_wait); bio_put(bio); return 0; } +static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) +{ + struct bio *bio2 = bio->bi_private; + mdk_rdev_t *rdev = bio2->bi_private; + mddev_t *mddev = rdev->mddev; + if (bio->bi_size) + return 1; + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && + error == -EOPNOTSUPP) { + unsigned long flags; + /* barriers don't appear to be supported :-( */ + set_bit(BarriersNotsupp, &rdev->flags); + mddev->barriers_work = 0; + spin_lock_irqsave(&mddev->write_lock, flags); + bio2->bi_next = mddev->biolist; + mddev->biolist = bio2; + spin_unlock_irqrestore(&mddev->write_lock, flags); + wake_up(&mddev->sb_wait); + bio_put(bio); + return 0; + } + bio_put(bio2); + bio->bi_private = rdev; + return super_written(bio, bytes_done, error); +} + void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page) { @@ -350,16 +378,54 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, * and decrement it on completion, waking up sb_wait * if zero is reached. * If an error occurred, call md_error + * + * As we might need to resubmit the request if BIO_RW_BARRIER + * causes ENOTSUPP, we allocate a spare bio... */ struct bio *bio = bio_alloc(GFP_NOIO, 1); + int rw = (1<bi_bdev = rdev->bdev; bio->bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; + bio->bi_rw = rw; + atomic_inc(&mddev->pending_writes); - submit_bio((1<flags)) { + struct bio *rbio; + rw |= (1<bi_private = bio; + rbio->bi_end_io = super_written_barrier; + submit_bio(rw, rbio); + } else + submit_bio(rw, bio); +} + +void md_super_wait(mddev_t *mddev) +{ + /* wait for all superblock writes that were scheduled to complete. + * if any had to be retried (due to BARRIER problems), retry them + */ + DEFINE_WAIT(wq); + for(;;) { + prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); + if (atomic_read(&mddev->pending_writes)==0) + break; + while (mddev->biolist) { + struct bio *bio; + spin_lock_irq(&mddev->write_lock); + bio = mddev->biolist; + mddev->biolist = bio->bi_next ; + bio->bi_next = NULL; + spin_unlock_irq(&mddev->write_lock); + submit_bio(bio->bi_rw, bio); + } + schedule(); + } + finish_wait(&mddev->sb_wait, &wq); } static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) @@ -1382,7 +1448,7 @@ static void md_update_sb(mddev_t * mddev) int sync_req; repeat: - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); sync_req = mddev->in_sync; mddev->utime = get_seconds(); mddev->events ++; @@ -1405,11 +1471,11 @@ repeat: */ if (!mddev->persistent) { mddev->sb_dirty = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); wake_up(&mddev->sb_wait); return; } - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); dprintk(KERN_INFO "md: updating %s RAID superblock on device (in sync %d)\n", @@ -1437,17 +1503,17 @@ repeat: /* only need to write one superblock... */ break; } - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); /* if there was a failure, sb_dirty was set to 1, and we re-write super */ - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { /* have to write it out again */ - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); goto repeat; } mddev->sb_dirty = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); wake_up(&mddev->sb_wait); } @@ -1989,6 +2055,7 @@ static int do_md_run(mddev_t * mddev) mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ + mddev->barriers_work = 1; /* before we start the array running, initialise the bitmap */ err = bitmap_create(mddev); @@ -2107,7 +2174,7 @@ static int do_md_stop(mddev_t * mddev, int ro) mddev->ro = 1; } else { bitmap_flush(mddev); - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); if (mddev->ro) set_disk_ro(disk, 0); blk_queue_make_request(mddev->queue, md_fail_request); @@ -3796,13 +3863,13 @@ void md_write_start(mddev_t *mddev, struct bio *bi) atomic_inc(&mddev->writes_pending); if (mddev->in_sync) { - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->in_sync) { mddev->in_sync = 0; mddev->sb_dirty = 1; md_wakeup_thread(mddev->thread); } - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); } wait_event(mddev->sb_wait, mddev->sb_dirty==0); } @@ -4112,7 +4179,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev_trylock(mddev)==0) { int spares =0; - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->safemode && !atomic_read(&mddev->writes_pending) && !mddev->in_sync && mddev->recovery_cp == MaxSector) { mddev->in_sync = 1; @@ -4120,7 +4187,7 @@ void md_check_recovery(mddev_t *mddev) } if (mddev->safemode == 1) mddev->safemode = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); if (mddev->sb_dirty) md_update_sb(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fb6b866c28f5..1cbf51fbd43f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -301,7 +301,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); - int mirror, behind; + int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); conf_t *conf = mddev_to_conf(r1_bio->mddev); if (bio->bi_size) @@ -311,47 +311,54 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int if (r1_bio->bios[mirror] == bio) break; - /* - * this branch is our 'one mirror IO has finished' event handler: - */ - if (!uptodate) { - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - /* an I/O failed, we can't clear the bitmap */ - set_bit(R1BIO_Degraded, &r1_bio->state); - } else + if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { + set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); + set_bit(R1BIO_BarrierRetry, &r1_bio->state); + r1_bio->mddev->barriers_work = 0; + } else { /* - * Set R1BIO_Uptodate in our master bio, so that - * we will return a good error code for to the higher - * levels even if IO on some other mirrored buffer fails. - * - * The 'master' represents the composite IO operation to - * user-side. So if something waits for IO, then it will - * wait for the 'master' bio. + * this branch is our 'one mirror IO has finished' event handler: */ - set_bit(R1BIO_Uptodate, &r1_bio->state); - - update_head_pos(mirror, r1_bio); - - behind = test_bit(R1BIO_BehindIO, &r1_bio->state); - if (behind) { - if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) - atomic_dec(&r1_bio->behind_remaining); - - /* In behind mode, we ACK the master bio once the I/O has safely - * reached all non-writemostly disks. Setting the Returned bit - * ensures that this gets done only once -- we don't ever want to - * return -EIO here, instead we'll wait */ - - if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && - test_bit(R1BIO_Uptodate, &r1_bio->state)) { - /* Maybe we can return now */ - if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { - struct bio *mbio = r1_bio->master_bio; - PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - (mbio->bi_size >> 9) - 1); - bio_endio(mbio, mbio->bi_size, 0); + r1_bio->bios[mirror] = NULL; + bio_put(bio); + if (!uptodate) { + md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } else + /* + * Set R1BIO_Uptodate in our master bio, so that + * we will return a good error code for to the higher + * levels even if IO on some other mirrored buffer fails. + * + * The 'master' represents the composite IO operation to + * user-side. So if something waits for IO, then it will + * wait for the 'master' bio. + */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + + update_head_pos(mirror, r1_bio); + + if (behind) { + if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) + atomic_dec(&r1_bio->behind_remaining); + + /* In behind mode, we ACK the master bio once the I/O has safely + * reached all non-writemostly disks. Setting the Returned bit + * ensures that this gets done only once -- we don't ever want to + * return -EIO here, instead we'll wait */ + + if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && + test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* Maybe we can return now */ + if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { + struct bio *mbio = r1_bio->master_bio; + PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", + (unsigned long long) mbio->bi_sector, + (unsigned long long) mbio->bi_sector + + (mbio->bi_size >> 9) - 1); + bio_endio(mbio, mbio->bi_size, 0); + } } } } @@ -361,8 +368,16 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int * already. */ if (atomic_dec_and_test(&r1_bio->remaining)) { + if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { + reschedule_retry(r1_bio); + /* Don't dec_pending yet, we want to hold + * the reference over the retry + */ + return 0; + } if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { /* free extra copy of the data pages */ +/* FIXME bio has been freed!!! */ int i = bio->bi_vcnt; while (i--) __free_page(bio->bi_io_vec[i].bv_page); @@ -648,8 +663,9 @@ static int make_request(request_queue_t *q, struct bio * bio) struct bio_list bl; struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); + int do_barriers; - if (unlikely(bio_barrier(bio))) { + if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } @@ -759,6 +775,10 @@ static int make_request(request_queue_t *q, struct bio * bio) atomic_set(&r1_bio->remaining, 0); atomic_set(&r1_bio->behind_remaining, 0); + do_barriers = bio->bi_rw & BIO_RW_BARRIER; + if (do_barriers) + set_bit(R1BIO_Barrier, &r1_bio->state); + bio_list_init(&bl); for (i = 0; i < disks; i++) { struct bio *mbio; @@ -771,7 +791,7 @@ static int make_request(request_queue_t *q, struct bio * bio) mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE; + mbio->bi_rw = WRITE | do_barriers; mbio->bi_private = r1_bio; if (behind_pages) { @@ -1153,6 +1173,36 @@ static void raid1d(mddev_t *mddev) if (test_bit(R1BIO_IsSync, &r1_bio->state)) { sync_request_write(mddev, r1_bio); unplug = 1; + } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { + /* some requests in the r1bio were BIO_RW_BARRIER + * requests which failed with -ENOTSUPP. Hohumm.. + * Better resubmit without the barrier. + * We know which devices to resubmit for, because + * all others have had their bios[] entry cleared. + */ + int i; + clear_bit(R1BIO_BarrierRetry, &r1_bio->state); + clear_bit(R1BIO_Barrier, &r1_bio->state); + for (i=0; i < conf->raid_disks; i++) + if (r1_bio->bios[i]) { + struct bio_vec *bvec; + int j; + + bio = bio_clone(r1_bio->master_bio, GFP_NOIO); + /* copy pages from the failed bio, as + * this might be a write-behind device */ + __bio_for_each_segment(bvec, bio, j, 0) + bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; + bio_put(r1_bio->bios[i]); + bio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + bio->bi_bdev = conf->mirrors[i].rdev->bdev; + bio->bi_end_io = raid1_end_write_request; + bio->bi_rw = WRITE; + bio->bi_private = r1_bio; + r1_bio->bios[i] = bio; + generic_make_request(bio); + } } else { int disk; bio = r1_bio->bios[r1_bio->read_disk]; diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 91467a3c4a52..13e7c4b62367 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h @@ -89,6 +89,7 @@ extern void md_print_devices (void); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); +extern void md_super_wait(mddev_t *mddev); extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 11629f92180a..d5854c2b2721 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -122,6 +122,7 @@ struct mdk_rdev_s #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ +#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ @@ -210,6 +211,13 @@ struct mddev_s int degraded; /* whether md should consider * adding a spare */ + int barriers_work; /* initialised to true, cleared as soon + * as a barrier request to slave + * fails. Only supported + */ + struct bio *biolist; /* bios that need to be retried + * because BIO_RW_BARRIER is not supported + */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 60e19b667548..292b98f2b408 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h @@ -110,7 +110,9 @@ struct r1bio_s { #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 #define R1BIO_Degraded 2 -#define R1BIO_BehindIO 3 +#define R1BIO_BehindIO 3 +#define R1BIO_Barrier 4 +#define R1BIO_BarrierRetry 5 /* For write-behind requests, we call bi_end_io when * the last non-write-behind device completes, providing * any write was successful. Otherwise we call when -- cgit v1.2.3 From 787453c2397edcc3261efebb661739acd8c38547 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:43 -0800 Subject: [PATCH] md: complete conversion of md to use kthreads There are a few loose ends following the conversion of md to use kthreads: - Some fields in mdk_thread_t that aren't needed (kthreads does it's own completion and manages it's own name). - thread->run is now never NULL, so no need to check - Some tests for signal_pending that aren't needed (As we don't use signals to stop threads any more) - Some flush_signals are not needed - Some waits are interruptible and don't need to be. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 33 ++++++++++----------------------- include/linux/raid/md_k.h | 2 -- 2 files changed, 10 insertions(+), 25 deletions(-) (limited to 'include/linux/raid') diff --git a/drivers/md/md.c b/drivers/md/md.c index 25f2bbfe6a2b..097ae1b5484b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3424,21 +3424,17 @@ static int md_thread(void * arg) */ allow_signal(SIGKILL); - complete(thread->event); while (!kthread_should_stop()) { - void (*run)(mddev_t *); - wait_event_interruptible_timeout(thread->wqueue, - test_bit(THREAD_WAKEUP, &thread->flags) - || kthread_should_stop(), - thread->timeout); + wait_event_timeout(thread->wqueue, + test_bit(THREAD_WAKEUP, &thread->flags) + || kthread_should_stop(), + thread->timeout); try_to_freeze(); clear_bit(THREAD_WAKEUP, &thread->flags); - run = thread->run; - if (run) - run(thread->mddev); + thread->run(thread->mddev); } return 0; @@ -3457,7 +3453,6 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, const char *name) { mdk_thread_t *thread; - struct completion event; thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); if (!thread) @@ -3466,18 +3461,14 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, memset(thread, 0, sizeof(mdk_thread_t)); init_waitqueue_head(&thread->wqueue); - init_completion(&event); - thread->event = &event; thread->run = run; thread->mddev = mddev; - thread->name = name; thread->timeout = MAX_SCHEDULE_TIMEOUT; thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; } - wait_for_completion(&event); return thread; } @@ -3941,9 +3932,7 @@ static void md_do_sync(mddev_t *mddev) mddev->curr_resync = 2; try_again: - if (signal_pending(current) || - kthread_should_stop()) { - flush_signals(current); + if (kthread_should_stop()) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); goto skip; } @@ -3963,9 +3952,8 @@ static void md_do_sync(mddev_t *mddev) * time 'round when curr_resync == 2 */ continue; - prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!signal_pending(current) && - !kthread_should_stop() && + prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); + if (!kthread_should_stop() && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying resync of %s" " until %s has finished resync (they" @@ -4074,13 +4062,12 @@ static void md_do_sync(mddev_t *mddev) } - if (signal_pending(current) || kthread_should_stop()) { + if (kthread_should_stop()) { /* * got a signal, exit. */ printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n"); - flush_signals(current); set_bit(MD_RECOVERY_INTR, &mddev->recovery); goto out; } @@ -4102,7 +4089,7 @@ static void md_do_sync(mddev_t *mddev) if (currspeed > sysctl_speed_limit_min) { if ((currspeed > sysctl_speed_limit_max) || !is_mddev_idle(mddev)) { - msleep_interruptible(250); + msleep(250); goto repeat; } } diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index d5854c2b2721..46629a275ba9 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -334,10 +334,8 @@ typedef struct mdk_thread_s { mddev_t *mddev; wait_queue_head_t wqueue; unsigned long flags; - struct completion *event; struct task_struct *tsk; unsigned long timeout; - const char *name; } mdk_thread_t; #define THREAD_WAKEUP 0 -- cgit v1.2.3