diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 12:05:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 12:05:33 -0700 |
commit | a022f7d575bb68c35be0a9ea68860411dec652fe (patch) | |
tree | 1860cd383d6ee291849c652ced784dd3d9acf357 /block | |
parent | 3de62951a5bee5dce5f4ffab8b7323ca9d3c7e1c (diff) | |
parent | a731763fc479a9c64456e0643d0ccf64203100c9 (diff) |
Merge tag 'block-5.14-2021-07-08' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe:
"A combination of changes that ended up depending on both the driver
and core branch (and/or the IDE removal), and a few late arriving
fixes. In detail:
- Fix io ticks wrap-around issue (Chunguang)
- nvme-tcp sock locking fix (Maurizio)
- s390-dasd fixes (Kees, Christoph)
- blk_execute_rq polling support (Keith)
- blk-cgroup RCU iteration fix (Yu)
- nbd backend ID addition (Prasanna)
- Partition deletion fix (Yufen)
- Use blk_mq_alloc_disk for mmc, mtip32xx, ubd (Christoph)
- Removal of now dead block request types due to IDE removal
(Christoph)
- Loop probing and control device cleanups (Christoph)
- Device uevent fix (Christoph)
- Misc cleanups/fixes (Tetsuo, Christoph)"
* tag 'block-5.14-2021-07-08' of git://git.kernel.dk/linux-block: (34 commits)
blk-cgroup: prevent rcu_sched detected stalls warnings while iterating blkgs
block: fix the problem of io_ticks becoming smaller
nvme-tcp: can't set sk_user_data without write_lock
loop: remove unused variable in loop_set_status()
block: remove the bdgrab in blk_drop_partitions
block: grab a device refcount in disk_uevent
s390/dasd: Avoid field over-reading memcpy()
dasd: unexport dasd_set_target_state
block: check disk exist before trying to add partition
ubd: remove dead code in ubd_setup_common
nvme: use return value from blk_execute_rq()
block: return errors from blk_execute_rq()
nvme: use blk_execute_rq() for passthrough commands
block: support polling through blk_execute_rq
block: remove REQ_OP_SCSI_{IN,OUT}
block: mark blk_mq_init_queue_data static
loop: rewrite loop_exit using idr_for_each_entry
loop: split loop_lookup
loop: don't allow deleting an unspecified loop device
loop: move loop_ctl_mutex locking into loop_add
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 15 | ||||
-rw-r--r-- | block/blk-core.c | 4 | ||||
-rw-r--r-- | block/blk-exec.c | 25 | ||||
-rw-r--r-- | block/blk-mq.c | 3 | ||||
-rw-r--r-- | block/bsg-lib.c | 2 | ||||
-rw-r--r-- | block/bsg.c | 2 | ||||
-rw-r--r-- | block/genhd.c | 4 | ||||
-rw-r--r-- | block/partitions/core.c | 29 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 6 |
9 files changed, 63 insertions, 27 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 7b06a5fa3cac..575d7a2e7203 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -56,6 +56,8 @@ static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ bool blkcg_debug_stats = false; static struct workqueue_struct *blkcg_punt_bio_wq; +#define BLKG_DESTROY_BATCH_SIZE 64 + static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { @@ -422,7 +424,9 @@ static void blkg_destroy(struct blkcg_gq *blkg) static void blkg_destroy_all(struct request_queue *q) { struct blkcg_gq *blkg, *n; + int count = BLKG_DESTROY_BATCH_SIZE; +restart: spin_lock_irq(&q->queue_lock); list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { struct blkcg *blkcg = blkg->blkcg; @@ -430,6 +434,17 @@ static void blkg_destroy_all(struct request_queue *q) spin_lock(&blkcg->lock); blkg_destroy(blkg); spin_unlock(&blkcg->lock); + + /* + * in order to avoid holding the spin lock for too long, release + * it when a batch of blkgs are destroyed. + */ + if (!(--count)) { + count = BLKG_DESTROY_BATCH_SIZE; + spin_unlock_irq(&q->queue_lock); + cond_resched(); + goto restart; + } } q->root_blkg = NULL; diff --git a/block/blk-core.c b/block/blk-core.c index 514838ccab2d..04477697ee4b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -142,8 +142,6 @@ static const char *const blk_op_name[] = { REQ_OP_NAME(ZONE_APPEND), REQ_OP_NAME(WRITE_SAME), REQ_OP_NAME(WRITE_ZEROES), - REQ_OP_NAME(SCSI_IN), - REQ_OP_NAME(SCSI_OUT), REQ_OP_NAME(DRV_IN), REQ_OP_NAME(DRV_OUT), }; @@ -1243,7 +1241,7 @@ static void update_io_ticks(struct block_device *part, unsigned long now, unsigned long stamp; again: stamp = READ_ONCE(part->bd_stamp); - if (unlikely(stamp != now)) { + if (unlikely(time_after(now, stamp))) { if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp)) __part_stat_add(part, io_ticks, end ? now - stamp : 1); } diff --git a/block/blk-exec.c b/block/blk-exec.c index beae70a0e5e5..d6cd501c0d34 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -21,7 +21,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error) { struct completion *waiting = rq->end_io_data; - rq->end_io_data = NULL; + rq->end_io_data = (void *)(uintptr_t)error; /* * complete last, if this is a stack request the process (and thus @@ -63,6 +63,19 @@ void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq, } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); +static bool blk_rq_is_poll(struct request *rq) +{ + return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL; +} + +static void blk_rq_poll_completion(struct request *rq, struct completion *wait) +{ + do { + blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true); + cond_resched(); + } while (!completion_done(wait)); +} + /** * blk_execute_rq - insert a request into queue for execution * @bd_disk: matching gendisk @@ -72,8 +85,9 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution and wait for completion. + * Return: The blk_status_t result provided to blk_mq_end_request(). */ -void blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head) +blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head) { DECLARE_COMPLETION_ONSTACK(wait); unsigned long hang_check; @@ -83,9 +97,14 @@ void blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head) /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; - if (hang_check) + + if (blk_rq_is_poll(rq)) + blk_rq_poll_completion(rq, &wait); + else if (hang_check) while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); else wait_for_completion_io(&wait); + + return (blk_status_t)(uintptr_t)rq->end_io_data; } EXPORT_SYMBOL(blk_execute_rq); diff --git a/block/blk-mq.c b/block/blk-mq.c index 2e9fd0ec63d7..2c4ac51e54eb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3109,7 +3109,7 @@ void blk_mq_release(struct request_queue *q) blk_mq_sysfs_deinit(q); } -struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, +static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, void *queuedata) { struct request_queue *q; @@ -3126,7 +3126,6 @@ struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, } return q; } -EXPORT_SYMBOL_GPL(blk_mq_init_queue_data); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { diff --git a/block/bsg-lib.c b/block/bsg-lib.c index cf1811261722..a89d80102304 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -45,7 +45,7 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, return PTR_ERR(job->request); if (hdr->dout_xfer_len && hdr->din_xfer_len) { - job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); + job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); if (IS_ERR(job->bidi_rq)) { ret = PTR_ERR(job->bidi_rq); goto out; diff --git a/block/bsg.c b/block/bsg.c index 33e3f82ed03a..1f196563ae6c 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -154,7 +154,7 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) return ret; rq = blk_get_request(q, hdr.dout_xfer_len ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); diff --git a/block/genhd.c b/block/genhd.c index 79aa40b4c39c..af4d2ab4a633 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -365,12 +365,12 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action) xa_for_each(&disk->part_tbl, idx, part) { if (bdev_is_partition(part) && !bdev_nr_sectors(part)) continue; - if (!bdgrab(part)) + if (!kobject_get_unless_zero(&part->bd_device.kobj)) continue; rcu_read_unlock(); kobject_uevent(bdev_kobj(part), action); - bdput(part); + put_device(&part->bd_device); rcu_read_lock(); } rcu_read_unlock(); diff --git a/block/partitions/core.c b/block/partitions/core.c index 347c56a51d87..4230d4f71879 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -453,17 +453,26 @@ int bdev_add_partition(struct block_device *bdev, int partno, sector_t start, sector_t length) { struct block_device *part; + struct gendisk *disk = bdev->bd_disk; + int ret; - mutex_lock(&bdev->bd_disk->open_mutex); - if (partition_overlaps(bdev->bd_disk, start, length, -1)) { - mutex_unlock(&bdev->bd_disk->open_mutex); - return -EBUSY; + mutex_lock(&disk->open_mutex); + if (!(disk->flags & GENHD_FL_UP)) { + ret = -ENXIO; + goto out; + } + + if (partition_overlaps(disk, start, length, -1)) { + ret = -EBUSY; + goto out; } - part = add_partition(bdev->bd_disk, partno, start, length, + part = add_partition(disk, partno, start, length, ADDPART_FLAG_NONE, NULL); - mutex_unlock(&bdev->bd_disk->open_mutex); - return PTR_ERR_OR_ZERO(part); + ret = PTR_ERR_OR_ZERO(part); +out: + mutex_unlock(&disk->open_mutex); + return ret; } int bdev_del_partition(struct block_device *bdev, int partno) @@ -537,12 +546,8 @@ void blk_drop_partitions(struct gendisk *disk) lockdep_assert_held(&disk->open_mutex); - xa_for_each_start(&disk->part_tbl, idx, part, 1) { - if (!bdgrab(part)) - continue; + xa_for_each_start(&disk->part_tbl, idx, part, 1) delete_partition(part); - bdput(part); - } } static bool blk_add_partition(struct gendisk *disk, diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index fa6df11b8bdd..d247431a6853 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -313,7 +313,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, at_head = 1; ret = -ENOMEM; - rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); req = scsi_req(rq); @@ -435,7 +435,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, } - rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto error_free_buffer; @@ -524,7 +524,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, struct request *rq; int err; - rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0); + rq = blk_get_request(q, REQ_OP_DRV_OUT, 0); if (IS_ERR(rq)) return PTR_ERR(rq); rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |