diff options
author | SandeepKsinha <sandeepksinha@gmail.com> | 2009-06-18 08:49:35 +1000 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-06-18 08:49:35 +1000 |
commit | af11c397fd8835c70ec0bb777104e4ab98b2d660 (patch) | |
tree | 59371fb1f955083f890dd6434e4c2f0ab9b7a97a /drivers/md/linear.c | |
parent | 0894cc3066aaa3e75a99383c0d25feebf9b688ac (diff) |
md linear: Protecting mddev with rcu locks to avoid races
Due to the lack of memory ordering guarantees, we may have races around
mddev->conf.
In particular, the correct contents of the structure we get from
dereferencing ->private might not be visible to this CPU yet, and
they might not be correct w.r.t mddev->raid_disks.
This patch addresses the problem using rcu protection to avoid
such race conditions.
Signed-off-by: SandeepKsinha <sandeepksinha@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/linear.c')
-rw-r--r-- | drivers/md/linear.c | 47 |
1 files changed, 38 insertions, 9 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 564c390f8a1..93f2b1d1839 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -28,10 +28,11 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) { int lo, mid, hi; - linear_conf_t *conf = mddev->private; + linear_conf_t *conf; lo = 0; hi = mddev->raid_disks - 1; + conf = rcu_dereference(mddev->private); /* * Binary Search @@ -66,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q, unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + rcu_read_lock(); dev0 = which_dev(mddev, sector); maxsectors = dev0->end_sector - sector; + rcu_read_unlock(); if (maxsectors < bio_sectors) maxsectors = 0; @@ -86,36 +89,50 @@ static int linear_mergeable_bvec(struct request_queue *q, static void linear_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; - linear_conf_t *conf = mddev->private; + linear_conf_t *conf; int i; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); + for (i=0; i < mddev->raid_disks; i++) { struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); blk_unplug(r_queue); } + rcu_read_unlock(); } static int linear_congested(void *data, int bits) { mddev_t *mddev = data; - linear_conf_t *conf = mddev->private; + linear_conf_t *conf; int i, ret = 0; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); + for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } + + rcu_read_unlock(); return ret; } static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) { - linear_conf_t *conf = mddev->private; + linear_conf_t *conf; + sector_t array_sectors; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); + array_sectors = conf->array_sectors; + rcu_read_unlock(); - return conf->array_sectors; + return array_sectors; } static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) @@ -229,8 +246,8 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) return -ENOMEM; newconf->prev = mddev->private; - mddev->private = newconf; mddev->raid_disks++; + rcu_assign_pointer(mddev->private, newconf); md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); return 0; @@ -239,7 +256,13 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) static int linear_stop (mddev_t *mddev) { linear_conf_t *conf = mddev->private; - + + /* + * We do not require rcu protection here since + * we hold reconfig_mutex for both linear_add and + * linear_stop, so they cannot race. + */ + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ do { linear_conf_t *t = conf->prev; @@ -269,9 +292,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) bio_sectors(bio)); part_stat_unlock(); + rcu_read_lock(); tmp_dev = which_dev(mddev, bio->bi_sector); start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; + if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) || (bio->bi_sector < start_sector))) { char b[BDEVNAME_SIZE]; @@ -282,6 +307,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) bdevname(tmp_dev->rdev->bdev, b), (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); + rcu_read_unlock(); bio_io_error(bio); return 0; } @@ -291,9 +317,11 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) * split it. */ struct bio_pair *bp; + sector_t end_sector = tmp_dev->end_sector; + + rcu_read_unlock(); - bp = bio_split(bio, - tmp_dev->end_sector - bio->bi_sector); + bp = bio_split(bio, end_sector - bio->bi_sector); if (linear_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); @@ -306,6 +334,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) bio->bi_bdev = tmp_dev->rdev->bdev; bio->bi_sector = bio->bi_sector - start_sector + tmp_dev->rdev->data_offset; + rcu_read_unlock(); return 1; } |