summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 08:19:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-08 08:19:16 -0800
commit6035ccd8e9e40bb654fbfdef325902ab531679a5 (patch)
treec1810d8a4d4ef150cdf14af72e6087dfc3f4b6e0 /include/linux/blkdev.h
parent23eb3b64b5e44680c867e165fe1cd18e57fba255 (diff)
parent878eaddd05d251cefa9632c2b8046833c5eead66 (diff)
Merge branch 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block: (113 commits) cfq-iosched: Do not access cfqq after freeing it block: include linux/err.h to use ERR_PTR cfq-iosched: use call_rcu() instead of doing grace period stall on queue exit blkio: Allow CFQ group IO scheduling even when CFQ is a module blkio: Implement dynamic io controlling policy registration blkio: Export some symbols from blkio as its user CFQ can be a module block: Fix io_context leak after failure of clone with CLONE_IO block: Fix io_context leak after clone with CLONE_IO cfq-iosched: make nonrot check logic consistent io controller: quick fix for blk-cgroup and modular CFQ cfq-iosched: move IO controller declerations to a header file cfq-iosched: fix compile problem with !CONFIG_CGROUP blkio: Documentation blkio: Wait on sync-noidle queue even if rq_noidle = 1 blkio: Implement group_isolation tunable blkio: Determine async workload length based on total number of queues blkio: Wait for cfq queue to get backlogged if group is empty blkio: Propagate cgroup weight updation to cfq groups blkio: Drop the reference to queue once the task changes cgroup blkio: Provide some isolation between groups ...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h56
1 files changed, 43 insertions, 13 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 221cecd86bd..784a919aa0d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -312,13 +312,17 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char misaligned;
+ unsigned char discard_misaligned;
unsigned char no_cluster;
+ signed char discard_zeroes_data;
};
struct request_queue
@@ -749,6 +753,17 @@ struct req_iterator {
#define rq_iter_last(rq, _iter) \
(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
+#endif
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+extern void rq_flush_dcache_pages(struct request *rq);
+#else
+static inline void rq_flush_dcache_pages(struct request *rq)
+{
+}
+#endif
+
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
@@ -823,19 +838,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_disk->queue;
}
-static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
- struct page *page)
-{
- if (bdi && bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
-}
-
-static inline void blk_run_address_space(struct address_space *mapping)
-{
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, NULL);
-}
-
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
@@ -1134,6 +1136,34 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
return q->limits.alignment_offset;
}
+static inline int queue_discard_alignment(struct request_queue *q)
+{
+ if (q->limits.discard_misaligned)
+ return -1;
+
+ return q->limits.discard_alignment;
+}
+
+static inline int queue_sector_discard_alignment(struct request_queue *q,
+ sector_t sector)
+{
+ return ((sector << 9) - q->limits.discard_alignment)
+ & (q->limits.discard_granularity - 1);
+}
+
+static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+{
+ if (q->limits.discard_zeroes_data == 1)
+ return 1;
+
+ return 0;
+}
+
+static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
+{
+ return queue_discard_zeroes_data(bdev_get_queue(bdev));
+}
+
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;