summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-lib.c13
-rw-r--r--block/blk-mq.c35
-rw-r--r--block/blk-wbt.c13
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/bsg.c3
-rw-r--r--block/ioctl.c5
-rw-r--r--block/partition-generic.c14
-rw-r--r--block/partitions/ibm.c2
-rw-r--r--block/scsi_ioctl.c5
9 files changed, 59 insertions, 35 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ed89c8f4b2a0..f8c82a9b4012 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- if (discard) {
- ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
- BLKDEV_DISCARD_ZERO, biop);
- if (ret == 0 || (ret && ret != -EOPNOTSUPP))
- goto out;
- }
-
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
biop);
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
struct bio *bio = NULL;
struct blk_plug plug;
+ if (discard) {
+ if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+ BLKDEV_DISCARD_ZERO))
+ return 0;
+ }
+
blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d79fdc11b1ee..c3400b5444a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
{
LIST_HEAD(rq_list);
- LIST_HEAD(driver_list);
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
@@ -1605,7 +1604,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
INIT_LIST_HEAD(&tags->page_list);
tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
@@ -1631,7 +1630,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1652,7 +1651,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
* Allow kmemleak to scan these pages as they contain pointers
* to additional allocations like via ops->init_request().
*/
- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;
@@ -1870,7 +1869,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
static void blk_mq_map_swqueue(struct request_queue *q,
const struct cpumask *online_mask)
{
- unsigned int i;
+ unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -1893,6 +1892,21 @@ static void blk_mq_map_swqueue(struct request_queue *q,
if (!cpumask_test_cpu(i, online_mask))
continue;
+ hctx_idx = q->mq_map[i];
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[hctx_idx]) {
+ set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
+
+ /*
+ * If tags initialization fail for some hctx,
+ * that hctx won't be brought online. In this
+ * case, remap the current ctx to hctx[0] which
+ * is guaranteed to always have tags allocated
+ */
+ if (!set->tags[hctx_idx])
+ q->mq_map[i] = 0;
+ }
+
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@@ -1909,7 +1923,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
- if (set->tags[i]) {
+ /* Never unmap queue 0. We need it as a
+ * fallback in case of a new remap fails
+ * allocation
+ */
+ if (i && set->tags[i]) {
blk_mq_free_rq_map(set, set->tags[i], i);
set->tags[i] = NULL;
}
@@ -1917,9 +1935,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;
}
- /* unmapped hw queue can be remapped after CPU topo changed */
- if (!set->tags[i])
- set->tags[i] = blk_mq_init_rq_map(set, i);
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
@@ -2553,7 +2568,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
* This will be replaced with the stats tracking code, using
* 'avg_completion_time / 2' as the pre-sleep target.
*/
- kt = ktime_set(0, nsecs);
+ kt = nsecs;
mode = HRTIMER_MODE_REL;
hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6e82769f4042..f0a9c07b4c7a 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+ __releases(lock)
+ __acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
if (may_queue(rwb, rqw, &wait, rw))
break;
- if (lock)
+ if (lock) {
spin_unlock_irq(lock);
-
- io_schedule();
-
- if (lock)
+ io_schedule();
spin_lock_irq(lock);
+ } else
+ io_schedule();
} while (1);
finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
{
unsigned int ret = 0;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 472211fa183a..3bd15d8095b1 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -16,7 +16,7 @@
static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector)
{
- sector_t zone_mask = blk_queue_zone_size(q) - 1;
+ sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
return sector & ~zone_mask;
}
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
return -EINVAL;
/* Check alignment (handle eventual smaller last zone) */
- zone_sectors = blk_queue_zone_size(q);
+ zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
return -EINVAL;
diff --git a/block/bsg.c b/block/bsg.c
index 8a05a404ae70..a57046de2f07 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dprintk("%s: write %Zd bytes\n", bd->name, count);
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
bsg_set_block(bd, file);
bytes_written = 0;
diff --git a/block/ioctl.c b/block/ioctl.c
index f856963204f4..be7f4de3eb3c 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include <linux/blktrace_api.h>
#include <linux/pr.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
{
@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|| pstart < 0 || plength < 0 || partno > 65535)
return -EINVAL;
}
+ /* check if partition is aligned to blocksize */
+ if (p.start & (bdev_logical_block_size(bdev) - 1))
+ return -EINVAL;
mutex_lock(&bdev->bd_mutex);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d7beb6bbbf66..7afb9907821f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
struct block_device *bdev,
sector_t from, sector_t size)
{
- unsigned int zone_size = bdev_zone_size(bdev);
+ unsigned int zone_sectors = bdev_zone_sectors(bdev);
/*
* If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
* regular block devices (no zone operation) and their zone size will
* be reported as 0. Allow this case.
*/
- if (!zone_size)
+ if (!zone_sectors)
return true;
/*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
* use it. Check the zone size too: it should be a power of 2 number
* of sectors.
*/
- if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+ if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
u32 rem;
- div_u64_rem(from, zone_size, &rem);
+ div_u64_rem(from, zone_sectors, &rem);
if (rem)
return false;
if ((from + size) < get_capacity(disk)) {
- div_u64_rem(size, zone_size, &rem);
+ div_u64_rem(size, zone_sectors, &rem);
if (rem)
return false;
}
} else {
- if (from & (zone_size - 1))
+ if (from & (zone_sectors - 1))
return false;
if ((from + size) < get_capacity(disk) &&
- (size & (zone_size - 1)))
+ (size & (zone_sectors - 1)))
return false;
}
diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c
index 47a61474e795..14b081af8d61 100644
--- a/block/partitions/ibm.c
+++ b/block/partitions/ibm.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include <asm/dasd.h>
#include <asm/ebcdic.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/vtoc.h>
#include "check.h"
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 0774799942e0..c2b64923ab66 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/times.h>
#include <linux/uio.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(WRITE_SAME, filter->write_ok);
+ __set_bit(WRITE_SAME_16, filter->write_ok);
+ __set_bit(WRITE_SAME_32, filter->write_ok);
__set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok);