diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-07-20 15:38:23 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-07-20 15:38:23 -0600 |
commit | 9caaa66c918c020fd16e84d1c6ebcce9960df1b2 (patch) | |
tree | 578d455a53453a1aead77da399c5134e73224a66 /drivers/md/raid1.c | |
parent | ba47d845d715a010f7b51f6f89bae32845e6acb7 (diff) | |
parent | ef45fe470e1e5410db4af87abc5d5055427945ac (diff) |
Merge branch 'for-5.9/block' into for-5.9/block-merge
* for-5.9/block: (124 commits)
blk-cgroup: show global disk stats in root cgroup io.stat
blk-cgroup: make iostat functions visible to stat printing
block: improve discard bio alignment in __blkdev_issue_discard()
block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers
block: defer flush request no matter whether we have elevator
block: make blk_timeout_init() static
block: remove retry loop in ioc_release_fn()
block: remove unnecessary ioc nested locking
block: integrate bd_start_claiming into __blkdev_get
block: use bd_prepare_to_claim directly in the loop driver
block: refactor bd_start_claiming
block: simplify the restart case in __blkdev_get
Revert "blk-rq-qos: remove redundant finish_wait to rq_qos_wait."
block: always remove partitions from blk_drop_partitions()
block: relax jiffies rounding for timeouts
blk-mq: remove redundant validation in __blk_mq_end_request()
blk-mq: Remove unnecessary local variable
writeback: remove bdi->congested_fn
writeback: remove struct bdi_writeback_congested
writeback: remove {set,clear}_wb_congested
...
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 45 |
1 files changed, 7 insertions, 38 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index dcd27f3da84e..960d854c07f8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect return best_disk; } -static int raid1_congested(struct mddev *mddev, int bits) -{ - struct r1conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; i < conf->raid_disks * 2; i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - BUG_ON(!q); - - /* Note the '|| 1' - when read_balance prefers - * non-congested targets, it can be removed - */ - if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(q->backing_dev_info, bits); - else - ret &= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ @@ -834,7 +804,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) /* Just ignore it */ bio_endio(bio); else - generic_make_request(bio); + submit_bio_noacct(bio); bio = next; cond_resched(); } @@ -1312,7 +1282,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -1338,7 +1308,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, disk_devt(mddev->gendisk), r1_bio->sector); - generic_make_request(read_bio); + submit_bio_noacct(read_bio); } static void raid1_write_request(struct mddev *mddev, struct bio *bio, @@ -1483,7 +1453,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); bio_chain(split, bio); - generic_make_request(bio); + submit_bio_noacct(bio); bio = split; r1_bio->master_bio = bio; r1_bio->sectors = max_sectors; @@ -2240,7 +2210,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) atomic_inc(&r1_bio->remaining); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); - generic_make_request(wbio); + submit_bio_noacct(wbio); } put_sync_write_buf(r1_bio, 1); @@ -2926,7 +2896,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } } } else { @@ -2935,7 +2905,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, md_sync_acct_bio(bio, nr_sectors); if (read_targets == 1) bio->bi_opf &= ~MD_FAILFAST; - generic_make_request(bio); + submit_bio_noacct(bio); } return nr_sectors; } @@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality = .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, .takeover = raid1_takeover, - .congested = raid1_congested, }; static int __init raid_init(void) |