summaryrefslogtreecommitdiff
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-03-23 20:00:26 +0100
committerJens Axboe <axboe@suse.de>2006-03-23 20:00:26 +0100
commit2056a782f8e7e65fd4bfd027506b4ce1c5e9ccd4 (patch)
treed4fe59a7ca0c110690937085548936a4535c39db /block/ll_rw_blk.c
parent6dac40a7ce2483a47b54af07afebeb84131c7228 (diff)
[PATCH] Block queue IO tracing support (blktrace) as of 2006-03-23
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c44
1 files changed, 42 insertions, 2 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 6c793b196aa..062067fa7ea 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -28,6 +28,7 @@
#include <linux/writeback.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
+#include <linux/blktrace_api.h>
/*
* for max sense size
@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q)
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
return;
- if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+ if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+ blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+ }
}
EXPORT_SYMBOL(blk_plug_device);
@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
/*
* devices don't necessarily have an ->unplug_fn defined
*/
- if (q->unplug_fn)
+ if (q->unplug_fn) {
+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+ q->rq.count[READ] + q->rq.count[WRITE]);
+
q->unplug_fn(q);
+ }
}
static void blk_unplug_work(void *data)
{
request_queue_t *q = data;
+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+ q->rq.count[READ] + q->rq.count[WRITE]);
+
q->unplug_fn(q);
}
@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data)
{
request_queue_t *q = (request_queue_t *)data;
+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+ q->rq.count[READ] + q->rq.count[WRITE]);
+
kblockd_schedule_work(&q->unplug_work);
}
@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
+ if (q->blk_trace)
+ blk_trace_shutdown(q);
+
kmem_cache_free(requestq_cachep, q);
}
@@ -2129,6 +2145,8 @@ rq_starved:
rq_init(q, rq);
rq->rl = rl;
+
+ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
return rq;
}
@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
if (!rq) {
struct io_context *ioc;
+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
io_schedule();
@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request);
*/
void blk_requeue_request(request_queue_t *q, struct request *rq)
{
+ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (!q->back_merge_fn(q, req, bio))
break;
+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (!q->front_merge_fn(q, req, bio))
break;
+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+
bio->bi_next = req->bio;
req->bio = bio;
@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio)
request_queue_t *q;
sector_t maxsector;
int ret, nr_sectors = bio_sectors(bio);
+ dev_t old_dev;
might_sleep();
/* Test device or partition size, when known. */
@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio)
* NOTE: we don't repeat the blk_size check for each new device.
* Stacking drivers are expected to know what they are doing.
*/
+ maxsector = -1;
+ old_dev = 0;
do {
char b[BDEVNAME_SIZE];
@@ -3034,6 +3063,15 @@ end_io:
*/
blk_partition_remap(bio);
+ if (maxsector != -1)
+ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+ maxsector);
+
+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+
+ maxsector = bio->bi_sector;
+ old_dev = bio->bi_bdev->bd_dev;
+
ret = q->make_request_fn(q, bio);
} while (ret);
}
@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
int total_bytes, bio_nbytes, error, next_idx = 0;
struct bio *bio;
+ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+
/*
* extend uptodate bool to allow < 0 value to be direct io error
*/