From 6e39b69e7ea9205c5f80aeac3ef999ab8fb1a4cc Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 11 Nov 2005 05:30:24 -0600 Subject: [SCSI] export blk layer functions needed for blk_execute_rq_nowait To send async requests we need these two functions exported. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- block/ll_rw_blk.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 99c9ca6d599..c525b5a2b59 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2306,6 +2306,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, generic_unplug_device(q); } +EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); + /** * blk_execute_rq - insert a request into queue for execution * @q: queue to insert the request in @@ -2444,7 +2446,7 @@ void disk_round_stats(struct gendisk *disk) /* * queue lock must be held */ -static void __blk_put_request(request_queue_t *q, struct request *req) +void __blk_put_request(request_queue_t *q, struct request *req) { struct request_list *rl = req->rl; @@ -2473,6 +2475,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) } } +EXPORT_SYMBOL_GPL(__blk_put_request); + void blk_put_request(struct request *req) { unsigned long flags; -- cgit v1.2.3 From defd94b75409b983f94548ea2f52ff5787ddb848 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 5 Dec 2005 02:37:06 -0600 Subject: [SCSI] seperate max_sectors from max_hw_sectors - export __blk_put_request and blk_execute_rq_nowait needed for async REQ_BLOCK_PC requests - seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was already testing against max_sectors and SCSI-ml was setting max_sectors and max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set a valid max_hw_sectors for all LLDs. Today if a LLD does not set it SCSI-ml sets it to a safe default and some LLDs set it to a artificial low value to overcome memory and feedback issues. Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024, drivers that used to call blk_queue_max_sectors with a large value of max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- block/ll_rw_blk.c | 34 ++++++++++++++++++++++++++-------- block/scsi_ioctl.c | 2 +- drivers/md/dm-table.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- fs/bio.c | 20 +++++++++++--------- include/linux/blkdev.h | 3 ++- 6 files changed, 42 insertions(+), 21 deletions(-) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c525b5a2b59..d4beb9a89ee 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; - blk_queue_max_sectors(q, MAX_SECTORS); + blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); @@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); } - q->max_sectors = q->max_hw_sectors = max_sectors; + if (BLK_DEF_MAX_SECTORS > max_sectors) + q->max_hw_sectors = q->max_sectors = max_sectors; + else { + q->max_sectors = BLK_DEF_MAX_SECTORS; + q->max_hw_sectors = max_sectors; + } } EXPORT_SYMBOL(blk_queue_max_sectors); @@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) { /* zero is "infinity" */ - t->max_sectors = t->max_hw_sectors = - min_not_zero(t->max_sectors,b->max_sectors); + t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); @@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q, static int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { + unsigned short max_sectors; int len; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { + if (unlikely(blk_pc_request(req))) + max_sectors = q->max_hw_sectors; + else + max_sectors = q->max_sectors; + + if (req->nr_sectors + bio_sectors(bio) > max_sectors) { req->flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { + unsigned short max_sectors; int len; - if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { + if (unlikely(blk_pc_request(req))) + max_sectors = q->max_hw_sectors; + else + max_sectors = q->max_sectors; + + + if (req->nr_sectors + bio_sectors(bio) > max_sectors) { req->flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, struct bio *bio; int reading; - if (len > (q->max_sectors << 9)) + if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !ubuf) return -EINVAL; @@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, { struct bio *bio; - if (len > (q->max_sectors << 9)) + if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 382dea7b224..4e390dfd315 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q, if (verify_command(file, cmd)) return -EPERM; - if (hdr->dxfer_len > (q->max_sectors << 9)) + if (hdr->dxfer_len > (q->max_hw_sectors << 9)) return -EIO; if (hdr->dxfer_len) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a6d3baa46f6..a6f2dc66c3d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input) static void check_for_valid_limits(struct io_restrictions *rs) { if (!rs->max_sectors) - rs->max_sectors = MAX_SECTORS; + rs->max_sectors = SAFE_MAX_SECTORS; if (!rs->max_phys_segments) rs->max_phys_segments = MAX_PHYS_SEGMENTS; if (!rs->max_hw_segments) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 54a72f19748..14ad2a785a3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -462,6 +462,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, req = blk_get_request(sdev->request_queue, write, gfp); if (!req) goto free_sense; + req->flags |= REQ_BLOCK_PC | REQ_QUIET; if (use_sg) err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); @@ -477,7 +478,6 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->timeout = timeout; req->retries = retries; - req->flags |= REQ_BLOCK_PC | REQ_QUIET; req->end_io_data = sioc; sioc->data = privdata; diff --git a/fs/bio.c b/fs/bio.c index 4d21ee3873e..38d3e8023a0 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev) } static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page - *page, unsigned int len, unsigned int offset) + *page, unsigned int len, unsigned int offset, + unsigned short max_sectors) { int retried_segments = 0; struct bio_vec *bvec; @@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page if (bio->bi_vcnt >= bio->bi_max_vecs) return 0; - if (((bio->bi_size + len) >> 9) > q->max_sectors) + if (((bio->bi_size + len) >> 9) > max_sectors) return 0; /* @@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(q, bio, page, len, offset); + return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); } /** @@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, - len, offset); + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + return __bio_add_page(q, bio, page, len, offset, q->max_sectors); } struct bio_map_data { @@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, break; } - if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { ret = -EINVAL; break; } @@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, /* * sorry... */ - if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < + bytes) break; len -= bytes; @@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, if (bytes > len) bytes = len; - if (__bio_add_page(q, bio, virt_to_page(data), bytes, - offset) < bytes) + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, + offset) < bytes) break; data += bytes; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 509e9a03a32..a18500d196e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -702,7 +702,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 -#define MAX_SECTORS 255 +#define SAFE_MAX_SECTORS 255 +#define BLK_DEF_MAX_SECTORS 1024 #define MAX_SEGMENT_SIZE 65536 -- cgit v1.2.3