diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7fbaa910334..0b68044e7ab 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -195,7 +195,7 @@ struct request_pm_state #include <linux/elevator.h> typedef void (request_fn_proc) (struct request_queue *q); -typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); +typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef void (unprep_rq_fn) (struct request_queue *, struct request *); @@ -680,6 +680,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, struct scsi_ioctl_command __user *); +extern void blk_queue_bio(struct request_queue *q, struct bio *bio); + /* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be @@ -863,16 +865,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); /* - * Note: Code in between changing the blk_plug list/cb_list or element of such - * lists is preemptable, but such code can't do sleep (or be very careful), - * otherwise data is corrupted. For details, please check schedule() where - * blk_schedule_flush_plug() is called. + * blk_plug permits building a queue of related requests by holding the I/O + * fragments for a short period. This allows merging of sequential requests + * into single larger request. As the requests are moved from a per-task list to + * the device's request_queue in a batch, this results in improved scalability + * as the lock contention for request_queue lock is reduced. + * + * It is ok not to disable preemption when adding the request to the plug list + * or when attempting a merge, because blk_schedule_flush_list() will only flush + * the plug list when the task sleeps by itself. For details, please see + * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - unsigned long magic; - struct list_head list; - struct list_head cb_list; - unsigned int should_sort; + unsigned long magic; /* detect uninitialized use-cases */ + struct list_head list; /* requests */ + struct list_head cb_list; /* md requires an unplug callback */ + unsigned int should_sort; /* list to be sorted before flushing? */ }; #define BLK_MAX_REQUEST_COUNT 16 |