summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorNeil Brown <neilb@cse.unsw.edu.au>2002-06-18 04:17:48 -0700
committerLinus Torvalds <torvalds@home.transmeta.com>2002-06-18 04:17:48 -0700
commitf7bbc7e12d9d3a5799f2dd551323f23be682dfd9 (patch)
tree24b342686f77a02d88f13f4c49a2b5a0ee897492 /drivers
parent2d35e42d1cdb08f940a9c302c04bd6b24c44cedc (diff)
[PATCH] md 20 of 22 - Provide SMP safe locking for all_mddevs list.
Provide SMP safe locking for all_mddevs list. the all_mddevs_lock is added to protect all_mddevs and mddev_map. ITERATE_MDDEV is moved to md.c (it isn't needed elsewhere) and enhanced to take the lock appropriately and always have a refcount on the object that is given to the body of the loop. mddev_find is changed so that the structure is allocated outside a lock, but test-and-set is done inside the lock.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/md.c73
1 files changed, 55 insertions, 18 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1fccd7c3a4aa..825c78d16c44 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -127,8 +127,33 @@ static struct gendisk md_gendisk=
/*
* Enables to iterate over all existing md arrays
+ * all_mddevs_lock protects this list as well as mddev_map.
*/
static LIST_HEAD(all_mddevs);
+static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
+
+
+/*
+ * iterates through all used mddevs in the system.
+ * We take care to grab the all_mddevs_lock whenever navigating
+ * the list, and to always hold a refcount when unlocked.
+ * Any code which breaks out of this loop while own
+ * a reference to the current mddev and must mddev_put it.
+ */
+#define ITERATE_MDDEV(mddev,tmp) \
+ \
+ for (spin_lock(&all_mddevs_lock), \
+ (tmp = all_mddevs.next), \
+ (mddev = NULL); \
+ (void)(tmp != &all_mddevs && \
+ mddev_get(list_entry(tmp, mddev_t, all_mddevs))),\
+ spin_unlock(&all_mddevs_lock), \
+ (mddev ? mddev_put(mddev):(void)NULL), \
+ (mddev = list_entry(tmp, mddev_t, all_mddevs)), \
+ (tmp != &all_mddevs); \
+ spin_lock(&all_mddevs_lock), \
+ (tmp = tmp->next) \
+ )
static mddev_t *mddev_map[MAX_MD_DEVS];
@@ -146,7 +171,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
static void mddev_put(mddev_t *mddev)
{
- if (!atomic_dec_and_test(&mddev->active))
+ if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->sb && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
@@ -154,33 +179,44 @@ static void mddev_put(mddev_t *mddev)
kfree(mddev);
MOD_DEC_USE_COUNT;
}
+ spin_unlock(&all_mddevs_lock);
}
static mddev_t * mddev_find(int unit)
{
- mddev_t *mddev;
+ mddev_t *mddev, *new = NULL;
- if ((mddev = mddev_map[unit])) {
- return mddev_get(mddev);
+ retry:
+ spin_lock(&all_mddevs_lock);
+ if (mddev_map[unit]) {
+ mddev = mddev_get(mddev_map[unit]);
+ spin_unlock(&all_mddevs_lock);
+ if (new)
+ kfree(new);
+ return mddev;
}
- mddev = (mddev_t *) kmalloc(sizeof(*mddev), GFP_KERNEL);
- if (!mddev)
- return NULL;
-
- memset(mddev, 0, sizeof(*mddev));
+ if (new) {
+ mddev_map[unit] = new;
+ list_add(&new->all_mddevs, &all_mddevs);
+ spin_unlock(&all_mddevs_lock);
+ MOD_INC_USE_COUNT;
+ return new;
+ }
+ spin_unlock(&all_mddevs_lock);
- mddev->__minor = unit;
- init_MUTEX(&mddev->reconfig_sem);
- INIT_LIST_HEAD(&mddev->disks);
- INIT_LIST_HEAD(&mddev->all_mddevs);
- atomic_set(&mddev->active, 1);
+ new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
- mddev_map[unit] = mddev;
- list_add(&mddev->all_mddevs, &all_mddevs);
+ memset(new, 0, sizeof(*new));
- MOD_INC_USE_COUNT;
+ new->__minor = unit;
+ init_MUTEX(&new->reconfig_sem);
+ INIT_LIST_HEAD(&new->disks);
+ INIT_LIST_HEAD(&new->all_mddevs);
+ atomic_set(&new->active, 1);
- return mddev;
+ goto retry;
}
static inline int mddev_lock(mddev_t * mddev)
@@ -3192,6 +3228,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
mddev2->curr_resync < 2)) {
flush_curr_signals();
err = -EINTR;
+ mddev_put(mddev2);
goto out;
}
}