diff options
author | Alan D. Brunelle <Alan.Brunelle@hp.com> | 2008-04-29 14:44:19 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 14:48:55 +0200 |
commit | ac9fafa1243640349aa481adf473db283a695766 (patch) | |
tree | 155c2371cca8971638d781269f39fa015bc6509c | |
parent | d7e3c3249ef23b4617393c69fe464765b4ff1645 (diff) |
block: Skip I/O merges when disabled
The block I/O + elevator + I/O scheduler code spend a lot of time trying
to merge I/Os -- rightfully so under "normal" circumstances. However,
if one were to know that the incoming I/O stream was /very/ random in
nature, the cycles are wasted.
This patch adds a per-request_queue tunable that (when set) disables
merge attempts (beyond the simple one-hit cache check), thus freeing up
a non-trivial amount of CPU cycles.
Signed-off-by: Alan D. Brunelle <alan.brunelle@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-sysfs.c | 26 | ||||
-rw-r--r-- | block/elevator.c | 3 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
3 files changed, 31 insertions, 0 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index fc41d83be22..e85c4013e8a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_hw_sectors_kb, (page)); } +static ssize_t queue_nomerges_show(struct request_queue *q, char *page) +{ + return queue_var_show(blk_queue_nomerges(q), page); +} + +static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long nm; + ssize_t ret = queue_var_store(&nm, page, count); + + if (nm) + set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + else + clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); + + return ret; +} + static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, @@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_hw_sector_size_show, }; +static struct queue_sysfs_entry queue_nomerges_entry = { + .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nomerges_show, + .store = queue_nomerges_store, +}; + static struct attribute *default_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = { &queue_max_sectors_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, + &queue_nomerges_entry.attr, NULL, }; diff --git a/block/elevator.c b/block/elevator.c index 7253fa05db0..ac5310ef827 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) } } + if (blk_queue_nomerges(q)) + return ELEVATOR_NO_MERGE; + /* * See if our hash lookup can find a potential backmerge. */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 08df1ea8bac..c09696a90d6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -408,6 +408,7 @@ struct request_queue #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) @@ -476,6 +477,7 @@ enum { #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |