diff options
| author | Anton Vorontsov <cbouatmailru@gmail.com> | 2008-07-30 02:05:23 +0400 |
|---|---|---|
| committer | Anton Vorontsov <cbouatmailru@gmail.com> | 2008-07-30 02:05:23 +0400 |
| commit | 9fec6060d9e48ed7db0dac0e16d0f0f0e615b7f6 (patch) | |
| tree | 74b41f31a08f6500ff3dfcf64ba21e2d9a8e87e5 /block/blk-core.c | |
| parent | fece418418f51e92dd7e67e17c5e3fe5a28d3279 (diff) | |
| parent | 6e86841d05f371b5b9b86ce76c02aaee83352298 (diff) | |
Merge branch 'master' of /home/cbou/linux-2.6
Conflicts:
drivers/power/Kconfig
drivers/power/Makefile
Diffstat (limited to 'block/blk-core.c')
| -rw-r--r-- | block/blk-core.c | 101 |
1 files changed, 49 insertions, 52 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index b754a4a2f9b..fef79ccb2a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done); static void drive_stat_acct(struct request *rq, int new_io) { + struct hd_struct *part; int rw = rq_data_dir(rq); if (!blk_fs_request(rq) || !rq->rq_disk) return; - if (!new_io) { - __all_stat_inc(rq->rq_disk, merges[rw], rq->sector); - } else { - struct hd_struct *part = get_part(rq->rq_disk, rq->sector); + part = get_part(rq->rq_disk, rq->sector); + if (!new_io) + __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); + else { disk_round_stats(rq->rq_disk); rq->rq_disk->in_flight++; if (part) { @@ -142,6 +143,10 @@ static void req_bio_endio(struct request *rq, struct bio *bio, bio->bi_size -= nbytes; bio->bi_sector += (nbytes >> 9); + + if (bio_integrity(bio)) + bio_integrity_advance(bio, nbytes); + if (bio->bi_size == 0) bio_endio(bio, error); } else { @@ -200,8 +205,7 @@ void blk_plug_device(struct request_queue *q) if (blk_queue_stopped(q)) return; - if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { - __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); + if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); } @@ -216,10 +220,9 @@ int blk_remove_plug(struct request_queue *q) { WARN_ON(!irqs_disabled()); - if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) + if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) return 0; - queue_flag_clear(QUEUE_FLAG_PLUGGED, q); del_timer(&q->unplug_timer); return 1; } @@ -253,9 +256,11 @@ EXPORT_SYMBOL(__generic_unplug_device); **/ void generic_unplug_device(struct request_queue *q) { - spin_lock_irq(q->queue_lock); - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); + if (blk_queue_plugged(q)) { + spin_lock_irq(q->queue_lock); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + } } EXPORT_SYMBOL(generic_unplug_device); @@ -321,8 +326,7 @@ void blk_start_queue(struct request_queue *q) * one level of recursion is ok and is much faster than kicking * the unplug handling */ - if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { - queue_flag_set(QUEUE_FLAG_REENTER, q); + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { q->request_fn(q); queue_flag_clear(QUEUE_FLAG_REENTER, q); } else { @@ -387,8 +391,7 @@ void __blk_run_queue(struct request_queue *q) * handling reinvoke the handler shortly if we already got there. */ if (!elv_queue_empty(q)) { - if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { - queue_flag_set(QUEUE_FLAG_REENTER, q); + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { q->request_fn(q); queue_flag_clear(QUEUE_FLAG_REENTER, q); } else { @@ -479,6 +482,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) kobject_init(&q->kobj, &blk_queue_ktype); mutex_init(&q->sysfs_lock); + spin_lock_init(&q->__queue_lock); return q; } @@ -541,10 +545,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) * if caller didn't supply a lock, they get per-queue locking with * our embedded lock */ - if (!lock) { - spin_lock_init(&q->__queue_lock); + if (!lock) lock = &q->__queue_lock; - } q->request_fn = rfn; q->prep_rq_fn = NULL; @@ -804,35 +806,32 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); + struct io_context *ioc; struct request_list *rl = &q->rq; prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - - if (!rq) { - struct io_context *ioc; + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); - blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); - - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); - io_schedule(); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + io_schedule(); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - ioc = current_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, ioc); + /* + * After sleeping, we become a "batching" process and + * will be able to allocate at least one request, and + * up to a big batch of them for a small period time. + * See ioc_batching, ioc_set_batching + */ + ioc = current_io_context(GFP_NOIO, q->node); + ioc_set_batching(q, ioc); - spin_lock_irq(q->queue_lock); - } + spin_lock_irq(q->queue_lock); finish_wait(&rl->wait[rw], &wait); - } + + rq = get_request(q, rw_flags, bio, GFP_NOIO); + }; return rq; } @@ -1043,15 +1042,9 @@ void blk_put_request(struct request *req) unsigned long flags; struct request_queue *q = req->q; - /* - * Gee, IDE calls in w/ NULL q. Fix IDE and remove the - * following if (q) test. - */ - if (q) { - spin_lock_irqsave(q->queue_lock, flags); - __blk_put_request(q, req); - spin_unlock_irqrestore(q->queue_lock, flags); - } + spin_lock_irqsave(q->queue_lock, flags); + __blk_put_request(q, req); + spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_put_request); @@ -1382,6 +1375,9 @@ end_io: */ blk_partition_remap(bio); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) + goto end_io; + if (old_sector != -1) blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, old_sector); @@ -1536,10 +1532,11 @@ static int __end_that_request_first(struct request *req, int error, } if (blk_fs_request(req) && req->rq_disk) { + struct hd_struct *part = get_part(req->rq_disk, req->sector); const int rw = rq_data_dir(req); - all_stat_add(req->rq_disk, sectors[rw], - nr_bytes >> 9, req->sector); + all_stat_add(req->rq_disk, part, sectors[rw], + nr_bytes >> 9, req->sector); } total_bytes = bio_nbytes = 0; @@ -1725,8 +1722,8 @@ static void end_that_request_last(struct request *req, int error) const int rw = rq_data_dir(req); struct hd_struct *part = get_part(disk, req->sector); - __all_stat_inc(disk, ios[rw], req->sector); - __all_stat_add(disk, ticks[rw], duration, req->sector); + __all_stat_inc(disk, part, ios[rw], req->sector); + __all_stat_add(disk, part, ticks[rw], duration, req->sector); disk_round_stats(disk); disk->in_flight--; if (part) { @@ -2045,7 +2042,7 @@ int __init blk_dev_init(void) for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); - open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); + open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); register_hotcpu_notifier(&blk_cpu_notifier); return 0; |
