From 4c2593270133708698d4b8cea2dab469479ad13b Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Sun, 29 May 2011 12:52:55 +0100 Subject: dm table: allow targets to support discards internally Permit a target to support discards regardless of whether or not all its underlying devices do. Signed-off-by: Mike Snitzer Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 6 +++++- include/linux/device-mapper.h | 6 ++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index cb8380c9767..215e112d153 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1346,7 +1346,8 @@ bool dm_table_supports_discards(struct dm_table *t) return 0; /* - * Ensure that at least one underlying device supports discards. + * Unless any target used by the table set discards_supported, + * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting discard must provide. @@ -1354,6 +1355,9 @@ bool dm_table_supports_discards(struct dm_table *t) while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); + if (ti->discards_supported) + return 1; + if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_discard_capable, NULL)) return 1; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 32a4423710f..4427e045405 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -191,6 +191,12 @@ struct dm_target { /* Used to provide an error string from the ctr */ char *error; + + /* + * Set if this target needs to receive discards regardless of + * whether or not its underlying devices have support. + */ + unsigned discards_supported:1; }; /* Each target can link one of these into the table */ -- cgit v1.2.3 From f4808ca99a203f20b4475601748e44b25a65bdec Mon Sep 17 00:00:00 2001 From: Milan Broz Date: Sun, 29 May 2011 13:02:52 +0100 Subject: dm table: reject devices without request fns This patch adds a check that a block device has a request function defined before it is used. Otherwise, misconfiguration can cause an oops. Because we are allowing devices with zero size e.g. an offline multipath device as in commit 2cd54d9bedb79a97f014e86c0da393416b264eb3 ("dm: allow offline devices") there needs to be an additional check to ensure devices are initialised. Some block devices, like a loop device without a backing file, exist but have no request function. Reproducer is trivial: dm-mirror on unbound loop device (no backing file on loop devices) dmsetup create x --table "0 8 mirror core 2 8 sync 2 /dev/loop0 0 /dev/loop1 0" and mirror resync will immediatelly cause OOps. BUG: unable to handle kernel NULL pointer dereference at (null) ? generic_make_request+0x2bd/0x590 ? kmem_cache_alloc+0xad/0x190 submit_bio+0x53/0xe0 ? bio_add_page+0x3b/0x50 dispatch_io+0x1ca/0x210 [dm_mod] ? read_callback+0x0/0xd0 [dm_mirror] dm_io+0xbb/0x290 [dm_mod] do_mirror+0x1e0/0x748 [dm_mirror] Signed-off-by: Milan Broz Reported-by: Zdenek Kabelac Acked-by: Mike Snitzer Cc: stable@kernel.org Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 215e112d153..451c3bb176d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { + struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = @@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; + /* + * Some devices exist without request functions, + * such as loop devices not yet bound to backing files. + * Forbid the use of such devices. + */ + q = bdev_get_queue(bdev); + if (!q || !q->make_request_fn) { + DMWARN("%s: %s is not yet initialised: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); + return 1; + } + if (!dev_size) return 0; -- cgit v1.2.3 From 6f13f6fba76edc7d0e7580c5deee829d59a41b2f Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Sun, 29 May 2011 13:02:55 +0100 Subject: dm mpath: do not fail paths after integrity errors Integrity errors need to be passed to the owner of the integrity metadata for processing. Consequently EILSEQ should be passed up the stack. Cc: stable@kernel.org Signed-off-by: Martin K. Petersen Acked-by: Mike Snitzer Signed-off-by: Alasdair G Kergon --- drivers/md/dm-mpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index a550a057d99..aa4e570c2cb 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!error && !clone->errors) return 0; /* I/O complete */ - if (error == -EOPNOTSUPP || error == -EREMOTEIO) + if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) return error; if (mpio->pgpath) -- cgit v1.2.3 From a705a34a565a5445bf731bd8006d51ea4d2b4236 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:02:58 +0100 Subject: dm kcopyd: avoid pointless job splitting Don't split SUB_JOB_SIZE jobs If the job size equals SUB_JOB_SIZE, there is no point in splitting it. Splitting it just unnecessarily wastes time, because the split job size is SUB_JOB_SIZE too. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 1bb73a13ca4..505b6f5cd38 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -578,7 +578,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, job->fn = fn; job->context = context; - if (job->source.count < SUB_JOB_SIZE) + if (job->source.count <= SUB_JOB_SIZE) dispatch_job(job); else { -- cgit v1.2.3 From c6ea41fbbe08f270a8edef99dc369faf809d1bd6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:00 +0100 Subject: dm kcopyd: preallocate sub jobs to avoid deadlock There's a possible theoretical deadlock in dm-kcopyd because multiple allocations from the same mempool are required to finish a request. Avoid this by preallocating sub jobs. There is a mempool of 512 entries. Each request requires up to 9 entries from the mempool. If we have at least 57 concurrent requests running, the mempool may overflow and mempool allocations may start blocking until another entry is freed to the mempool. Because the same thread is used to free entries to the mempool and allocate entries from the mempool, this may result in a deadlock. This patch changes it so that one mempool entry contains all 9 "struct kcopyd_job" required to fulfill the whole request. The allocation is done only once in dm_kcopyd_copy and no further mempool allocations are done during request processing. If dm_kcopyd_copy is not run in the completion thread, this implementation is deadlock-free. MIN_JOBS needs reducing accordingly and we've chosen to reduce it further to 8. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 49 +++++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 505b6f5cd38..24fb42ed7b8 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -27,6 +27,10 @@ #include "dm.h" +#define SUB_JOB_SIZE 128 +#define SPLIT_COUNT 8 +#define MIN_JOBS 8 + /*----------------------------------------------------------------- * Each kcopyd client has its own little pool of preallocated * pages for kcopyd io. @@ -216,16 +220,17 @@ struct kcopyd_job { struct mutex lock; atomic_t sub_jobs; sector_t progress; -}; -/* FIXME: this should scale with the number of pages */ -#define MIN_JOBS 512 + struct kcopyd_job *master_job; +}; static struct kmem_cache *_job_cache; int __init dm_kcopyd_init(void) { - _job_cache = KMEM_CACHE(kcopyd_job, 0); + _job_cache = kmem_cache_create("kcopyd_job", + sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1), + __alignof__(struct kcopyd_job), 0, NULL); if (!_job_cache) return -ENOMEM; @@ -299,7 +304,12 @@ static int run_complete_job(struct kcopyd_job *job) if (job->pages) kcopyd_put_pages(kc, job->pages); - mempool_free(job, kc->job_pool); + /* + * If this is the master job, the sub jobs have already + * completed so we can free everything. + */ + if (job->master_job == job) + mempool_free(job, kc->job_pool); fn(read_err, write_err, context); if (atomic_dec_and_test(&kc->nr_jobs)) @@ -460,14 +470,14 @@ static void dispatch_job(struct kcopyd_job *job) wake(kc); } -#define SUB_JOB_SIZE 128 static void segment_complete(int read_err, unsigned long write_err, void *context) { /* FIXME: tidy this function */ sector_t progress = 0; sector_t count = 0; - struct kcopyd_job *job = (struct kcopyd_job *) context; + struct kcopyd_job *sub_job = (struct kcopyd_job *) context; + struct kcopyd_job *job = sub_job->master_job; struct dm_kcopyd_client *kc = job->kc; mutex_lock(&job->lock); @@ -498,8 +508,6 @@ static void segment_complete(int read_err, unsigned long write_err, if (count) { int i; - struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, - GFP_NOIO); *sub_job = *job; sub_job->source.sector += progress; @@ -511,7 +519,7 @@ static void segment_complete(int read_err, unsigned long write_err, } sub_job->fn = segment_complete; - sub_job->context = job; + sub_job->context = sub_job; dispatch_job(sub_job); } else if (atomic_dec_and_test(&job->sub_jobs)) { @@ -531,19 +539,19 @@ static void segment_complete(int read_err, unsigned long write_err, } /* - * Create some little jobs that will do the move between - * them. + * Create some sub jobs to share the work between them. */ -#define SPLIT_COUNT 8 -static void split_job(struct kcopyd_job *job) +static void split_job(struct kcopyd_job *master_job) { int i; - atomic_inc(&job->kc->nr_jobs); + atomic_inc(&master_job->kc->nr_jobs); - atomic_set(&job->sub_jobs, SPLIT_COUNT); - for (i = 0; i < SPLIT_COUNT; i++) - segment_complete(0, 0u, job); + atomic_set(&master_job->sub_jobs, SPLIT_COUNT); + for (i = 0; i < SPLIT_COUNT; i++) { + master_job[i + 1].master_job = master_job; + segment_complete(0, 0u, &master_job[i + 1]); + } } int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, @@ -553,7 +561,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, struct kcopyd_job *job; /* - * Allocate a new job. + * Allocate an array of jobs consisting of one master job + * followed by SPLIT_COUNT sub jobs. */ job = mempool_alloc(kc->job_pool, GFP_NOIO); @@ -577,10 +586,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, job->fn = fn; job->context = context; + job->master_job = job; if (job->source.count <= SUB_JOB_SIZE) dispatch_job(job); - else { mutex_init(&job->lock); job->progress = 0; -- cgit v1.2.3 From 4cc1b4cffd187a5c5d6264c8d766c49b3c57fb05 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:02 +0100 Subject: dm kcopyd: remove superfluous page allocation spinlock Remove the spinlock protecting the pages allocation. The spinlock is only taken on initialization or from single-threaded workqueue. Therefore, the spinlock is useless. The spinlock is taken in kcopyd_get_pages and kcopyd_put_pages. kcopyd_get_pages is only called from run_pages_job, which is only called from process_jobs called from do_work. kcopyd_put_pages is called from client_alloc_pages (which is initialization function) or from run_complete_job. run_complete_job is only called from process_jobs called from do_work. Another spinlock, kc->job_lock is taken each time someone pushes or pops some work for the worker thread. Once we take kc->job_lock, we guarantee that any written memory is visible to the other CPUs. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 24fb42ed7b8..ed957791639 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -36,7 +36,6 @@ * pages for kcopyd io. *---------------------------------------------------------------*/ struct dm_kcopyd_client { - spinlock_t lock; struct page_list *pages; unsigned int nr_pages; unsigned int nr_free_pages; @@ -99,11 +98,8 @@ static int kcopyd_get_pages(struct dm_kcopyd_client *kc, { struct page_list *pl; - spin_lock(&kc->lock); - if (kc->nr_free_pages < nr) { - spin_unlock(&kc->lock); + if (kc->nr_free_pages < nr) return -ENOMEM; - } kc->nr_free_pages -= nr; for (*pages = pl = kc->pages; --nr; pl = pl->next) @@ -112,8 +108,6 @@ static int kcopyd_get_pages(struct dm_kcopyd_client *kc, kc->pages = pl->next; pl->next = NULL; - spin_unlock(&kc->lock); - return 0; } @@ -121,14 +115,12 @@ static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) { struct page_list *cursor; - spin_lock(&kc->lock); for (cursor = pl; cursor->next; cursor = cursor->next) kc->nr_free_pages++; kc->nr_free_pages++; cursor->next = kc->pages; kc->pages = pl; - spin_unlock(&kc->lock); } /* @@ -625,7 +617,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages, if (!kc) return -ENOMEM; - spin_lock_init(&kc->lock); spin_lock_init(&kc->job_lock); INIT_LIST_HEAD(&kc->complete_jobs); INIT_LIST_HEAD(&kc->io_jobs); -- cgit v1.2.3 From f99b55eec795bd0fd577ab3ca06f3acfbe3b1ab1 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:04 +0100 Subject: dm kcopyd: add gfp parm to alloc_pl Introduce a parameter for gfp flags to alloc_pl() for use in following patches. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index ed957791639..0270844c2a3 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -70,15 +70,15 @@ static void wake(struct dm_kcopyd_client *kc) queue_work(kc->kcopyd_wq, &kc->kcopyd_work); } -static struct page_list *alloc_pl(void) +static struct page_list *alloc_pl(gfp_t gfp) { struct page_list *pl; - pl = kmalloc(sizeof(*pl), GFP_KERNEL); + pl = kmalloc(sizeof(*pl), gfp); if (!pl) return NULL; - pl->page = alloc_page(GFP_KERNEL); + pl->page = alloc_page(gfp); if (!pl->page) { kfree(pl); return NULL; @@ -143,7 +143,7 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) struct page_list *pl = NULL, *next; for (i = 0; i < nr; i++) { - next = alloc_pl(); + next = alloc_pl(GFP_KERNEL); if (!next) { if (pl) drop_pages(pl); -- cgit v1.2.3 From d04714580f12379fcf7a0f799e86c92b96dd4e1f Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:07 +0100 Subject: dm kcopyd: alloc pages from the main page allocator This patch changes dm-kcopyd so that it allocates pages from the main page allocator with __GFP_NOWARN | __GFP_NORETRY flags (so that it can fail in case of memory pressure). If the allocation fails, dm-kcopyd allocates pages from its own reserve. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 91 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 31 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 0270844c2a3..5dfbdcb40a4 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -37,8 +37,8 @@ *---------------------------------------------------------------*/ struct dm_kcopyd_client { struct page_list *pages; - unsigned int nr_pages; - unsigned int nr_free_pages; + unsigned nr_reserved_pages; + unsigned nr_free_pages; struct dm_io_client *io_client; @@ -70,6 +70,9 @@ static void wake(struct dm_kcopyd_client *kc) queue_work(kc->kcopyd_wq, &kc->kcopyd_work); } +/* + * Obtain one page for the use of kcopyd. + */ static struct page_list *alloc_pl(gfp_t gfp) { struct page_list *pl; @@ -93,34 +96,56 @@ static void free_pl(struct page_list *pl) kfree(pl); } -static int kcopyd_get_pages(struct dm_kcopyd_client *kc, - unsigned int nr, struct page_list **pages) +/* + * Add the provided pages to a client's free page list, releasing + * back to the system any beyond the reserved_pages limit. + */ +static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) { - struct page_list *pl; - - if (kc->nr_free_pages < nr) - return -ENOMEM; + struct page_list *next; - kc->nr_free_pages -= nr; - for (*pages = pl = kc->pages; --nr; pl = pl->next) - ; + do { + next = pl->next; - kc->pages = pl->next; - pl->next = NULL; + if (kc->nr_free_pages >= kc->nr_reserved_pages) + free_pl(pl); + else { + pl->next = kc->pages; + kc->pages = pl; + kc->nr_free_pages++; + } - return 0; + pl = next; + } while (pl); } -static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) +static int kcopyd_get_pages(struct dm_kcopyd_client *kc, + unsigned int nr, struct page_list **pages) { - struct page_list *cursor; + struct page_list *pl; - for (cursor = pl; cursor->next; cursor = cursor->next) - kc->nr_free_pages++; + *pages = NULL; + + do { + pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY); + if (unlikely(!pl)) { + /* Use reserved pages */ + pl = kc->pages; + if (unlikely(!pl)) + goto out_of_memory; + kc->pages = pl->next; + kc->nr_free_pages--; + } + pl->next = *pages; + *pages = pl; + } while (--nr); + + return 0; - kc->nr_free_pages++; - cursor->next = kc->pages; - kc->pages = pl; +out_of_memory: + if (*pages) + kcopyd_put_pages(kc, *pages); + return -ENOMEM; } /* @@ -137,12 +162,15 @@ static void drop_pages(struct page_list *pl) } } -static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) +/* + * Allocate and reserve nr_pages for the use of a specific client. + */ +static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages) { - unsigned int i; + unsigned i; struct page_list *pl = NULL, *next; - for (i = 0; i < nr; i++) { + for (i = 0; i < nr_pages; i++) { next = alloc_pl(GFP_KERNEL); if (!next) { if (pl) @@ -153,17 +181,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) pl = next; } + kc->nr_reserved_pages += nr_pages; kcopyd_put_pages(kc, pl); - kc->nr_pages += nr; + return 0; } static void client_free_pages(struct dm_kcopyd_client *kc) { - BUG_ON(kc->nr_free_pages != kc->nr_pages); + BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages); drop_pages(kc->pages); kc->pages = NULL; - kc->nr_free_pages = kc->nr_pages = 0; + kc->nr_free_pages = kc->nr_reserved_pages = 0; } /*----------------------------------------------------------------- @@ -607,7 +636,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block) /*----------------------------------------------------------------- * Client setup *---------------------------------------------------------------*/ -int dm_kcopyd_client_create(unsigned int nr_pages, +int dm_kcopyd_client_create(unsigned min_pages, struct dm_kcopyd_client **result) { int r = -ENOMEM; @@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages, goto bad_workqueue; kc->pages = NULL; - kc->nr_pages = kc->nr_free_pages = 0; - r = client_alloc_pages(kc, nr_pages); + kc->nr_reserved_pages = kc->nr_free_pages = 0; + r = client_reserve_pages(kc, min_pages); if (r) goto bad_client_pages; - kc->io_client = dm_io_client_create(nr_pages); + kc->io_client = dm_io_client_create(min_pages); if (IS_ERR(kc->io_client)) { r = PTR_ERR(kc->io_client); goto bad_io_client; -- cgit v1.2.3 From bda8efec5c706a672e0714d341a342e811f0262a Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:09 +0100 Subject: dm io: use fixed initial mempool size Replace the arbitrary calculation of an initial io struct mempool size with a constant. The code calculated the number of reserved structures based on the request size and used a "magic" multiplication constant of 4. This patch changes it to reserve a fixed number - itself still chosen quite arbitrarily. Further testing might show if there is a better number to choose. Note that if there is no memory pressure, we can still allocate an arbitrary number of "struct io" structures. One structure is enough to process the whole request. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-io.c | 27 +++++---------------------- drivers/md/dm-kcopyd.c | 2 +- drivers/md/dm-log.c | 3 +-- drivers/md/dm-raid1.c | 3 +-- drivers/md/dm-snap-persistent.c | 13 +------------ include/linux/dm-io.h | 3 +-- 6 files changed, 10 insertions(+), 41 deletions(-) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 76a5af00a26..2067288f61f 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -19,6 +19,8 @@ #define DM_MSG_PREFIX "io" #define DM_IO_MAX_REGIONS BITS_PER_LONG +#define MIN_IOS 16 +#define MIN_BIOS 16 struct dm_io_client { mempool_t *pool; @@ -40,34 +42,22 @@ struct io { static struct kmem_cache *_dm_io_cache; -/* - * io contexts are only dynamically allocated for asynchronous - * io. Since async io is likely to be the majority of io we'll - * have the same number of io contexts as bios! (FIXME: must reduce this). - */ - -static unsigned int pages_to_ios(unsigned int pages) -{ - return 4 * pages; /* too many ? */ -} - /* * Create a client with mempool and bioset. */ -struct dm_io_client *dm_io_client_create(unsigned num_pages) +struct dm_io_client *dm_io_client_create(void) { - unsigned ios = pages_to_ios(num_pages); struct dm_io_client *client; client = kmalloc(sizeof(*client), GFP_KERNEL); if (!client) return ERR_PTR(-ENOMEM); - client->pool = mempool_create_slab_pool(ios, _dm_io_cache); + client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); if (!client->pool) goto bad; - client->bios = bioset_create(16, 0); + client->bios = bioset_create(MIN_BIOS, 0); if (!client->bios) goto bad; @@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) } EXPORT_SYMBOL(dm_io_client_create); -int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) -{ - return mempool_resize(client->pool, pages_to_ios(num_pages), - GFP_KERNEL); -} -EXPORT_SYMBOL(dm_io_client_resize); - void dm_io_client_destroy(struct dm_io_client *client) { mempool_destroy(client->pool); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 5dfbdcb40a4..719693340d1 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -667,7 +667,7 @@ int dm_kcopyd_client_create(unsigned min_pages, if (r) goto bad_client_pages; - kc->io_client = dm_io_client_create(min_pages); + kc->io_client = dm_io_client_create(); if (IS_ERR(kc->io_client)) { r = PTR_ERR(kc->io_client); goto bad_io_client; diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index a1f32188967..948e3f4925b 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, lc->io_req.mem.type = DM_IO_VMA; lc->io_req.notify.fn = NULL; - lc->io_req.client = dm_io_client_create(dm_div_up(buf_size, - PAGE_SIZE)); + lc->io_req.client = dm_io_client_create(); if (IS_ERR(lc->io_req.client)) { r = PTR_ERR(lc->io_req.client); DMWARN("couldn't allocate disk io client"); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 976ad4688af..53089aa1138 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -22,7 +22,6 @@ #define DM_MSG_PREFIX "raid1" #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ -#define DM_IO_PAGES 64 #define DM_KCOPYD_PAGES 64 #define DM_RAID1_HANDLE_ERRORS 0x01 @@ -887,7 +886,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, return NULL; } - ms->io_client = dm_io_client_create(DM_IO_PAGES); + ms->io_client = dm_io_client_create(); if (IS_ERR(ms->io_client)) { ti->error = "Error creating dm_io client"; mempool_destroy(ms->read_record_pool); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 95891dfcbca..135c2f1fdbf 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -154,11 +154,6 @@ struct pstore { struct workqueue_struct *metadata_wq; }; -static unsigned sectors_to_pages(unsigned sectors) -{ - return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9); -} - static int alloc_area(struct pstore *ps) { int r = -ENOMEM; @@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) chunk_size_supplied = 0; } - ps->io_client = dm_io_client_create(sectors_to_pages(ps->store-> - chunk_size)); + ps->io_client = dm_io_client_create(); if (IS_ERR(ps->io_client)) return PTR_ERR(ps->io_client); @@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot) return r; } - r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), - ps->io_client); - if (r) - return r; - r = alloc_area(ps); return r; diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h index 5c9186b93ff..f4b0aa3126f 100644 --- a/include/linux/dm-io.h +++ b/include/linux/dm-io.h @@ -69,8 +69,7 @@ struct dm_io_request { * * Create/destroy may block. */ -struct dm_io_client *dm_io_client_create(unsigned num_pages); -int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client); +struct dm_io_client *dm_io_client_create(void); void dm_io_client_destroy(struct dm_io_client *client); /* -- cgit v1.2.3 From 5f43ba2950414dc0abf4ac44c397d88069056746 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:11 +0100 Subject: dm kcopyd: reserve fewer pages Reserve just the minimum of pages needed to process one job. Because we allocate pages from page allocator, we don't need to reserve a large number of pages. The maximum job size is SUB_JOB_SIZE and we calculate the number of reserved pages based on this. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 6 +++--- drivers/md/dm-raid1.c | 3 +-- drivers/md/dm-snap.c | 7 +------ include/linux/dm-kcopyd.h | 3 +-- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 719693340d1..579647f8b4d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -30,6 +30,7 @@ #define SUB_JOB_SIZE 128 #define SPLIT_COUNT 8 #define MIN_JOBS 8 +#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE)) /*----------------------------------------------------------------- * Each kcopyd client has its own little pool of preallocated @@ -636,8 +637,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block) /*----------------------------------------------------------------- * Client setup *---------------------------------------------------------------*/ -int dm_kcopyd_client_create(unsigned min_pages, - struct dm_kcopyd_client **result) +int dm_kcopyd_client_create(struct dm_kcopyd_client **result) { int r = -ENOMEM; struct dm_kcopyd_client *kc; @@ -663,7 +663,7 @@ int dm_kcopyd_client_create(unsigned min_pages, kc->pages = NULL; kc->nr_reserved_pages = kc->nr_free_pages = 0; - r = client_reserve_pages(kc, min_pages); + r = client_reserve_pages(kc, RESERVE_PAGES); if (r) goto bad_client_pages; diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 53089aa1138..9defad04541 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -22,7 +22,6 @@ #define DM_MSG_PREFIX "raid1" #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ -#define DM_KCOPYD_PAGES 64 #define DM_RAID1_HANDLE_ERRORS 0x01 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) @@ -1116,7 +1115,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err_destroy_wq; } - r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); + r = dm_kcopyd_client_create(&ms->kcopyd_client); if (r) goto err_destroy_wq; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index a2d330942cb..5a2296de84a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -39,11 +39,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; */ #define SNAPSHOT_COPY_PRIORITY 2 -/* - * Reserve 1MB for each snapshot initially (with minimum of 1 page). - */ -#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) - /* * The size of the mempool used to track chunks in use. */ @@ -1116,7 +1111,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } - r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); + r = dm_kcopyd_client_create(&s->kcopyd_client); if (r) { ti->error = "Could not create kcopyd client"; goto bad_kcopyd; diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h index 5db21631169..312513d4741 100644 --- a/include/linux/dm-kcopyd.h +++ b/include/linux/dm-kcopyd.h @@ -25,8 +25,7 @@ * To use kcopyd you must first create a dm_kcopyd_client object. */ struct dm_kcopyd_client; -int dm_kcopyd_client_create(unsigned num_pages, - struct dm_kcopyd_client **result); +int dm_kcopyd_client_create(struct dm_kcopyd_client **result); void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); /* -- cgit v1.2.3 From fa34ce73072f90ecd90dcc43f29d82e70e5f8676 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 29 May 2011 13:03:13 +0100 Subject: dm kcopyd: return client directly and not through a pointer Return client directly from dm_kcopyd_client_create, not through a parameter, making it consistent with dm_io_client_create. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-kcopyd.c | 9 ++++----- drivers/md/dm-raid1.c | 6 ++++-- drivers/md/dm-snap.c | 5 +++-- include/linux/dm-kcopyd.h | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 579647f8b4d..819e37eaaeb 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -637,14 +637,14 @@ int kcopyd_cancel(struct kcopyd_job *job, int block) /*----------------------------------------------------------------- * Client setup *---------------------------------------------------------------*/ -int dm_kcopyd_client_create(struct dm_kcopyd_client **result) +struct dm_kcopyd_client *dm_kcopyd_client_create(void) { int r = -ENOMEM; struct dm_kcopyd_client *kc; kc = kmalloc(sizeof(*kc), GFP_KERNEL); if (!kc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); spin_lock_init(&kc->job_lock); INIT_LIST_HEAD(&kc->complete_jobs); @@ -676,8 +676,7 @@ int dm_kcopyd_client_create(struct dm_kcopyd_client **result) init_waitqueue_head(&kc->destroyq); atomic_set(&kc->nr_jobs, 0); - *result = kc; - return 0; + return kc; bad_io_client: client_free_pages(kc); @@ -688,7 +687,7 @@ bad_workqueue: bad_slab: kfree(kc); - return r; + return ERR_PTR(r); } EXPORT_SYMBOL(dm_kcopyd_client_create); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9defad04541..9bfd057be68 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1115,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err_destroy_wq; } - r = dm_kcopyd_client_create(&ms->kcopyd_client); - if (r) + ms->kcopyd_client = dm_kcopyd_client_create(); + if (IS_ERR(ms->kcopyd_client)) { + r = PTR_ERR(ms->kcopyd_client); goto err_destroy_wq; + } wakeup_mirrord(ms); return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5a2296de84a..9ecff5f3023 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1111,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } - r = dm_kcopyd_client_create(&s->kcopyd_client); - if (r) { + s->kcopyd_client = dm_kcopyd_client_create(); + if (IS_ERR(s->kcopyd_client)) { + r = PTR_ERR(s->kcopyd_client); ti->error = "Could not create kcopyd client"; goto bad_kcopyd; } diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h index 312513d4741..298d587e349 100644 --- a/include/linux/dm-kcopyd.h +++ b/include/linux/dm-kcopyd.h @@ -25,7 +25,7 @@ * To use kcopyd you must first create a dm_kcopyd_client object. */ struct dm_kcopyd_client; -int dm_kcopyd_client_create(struct dm_kcopyd_client **result); +struct dm_kcopyd_client *dm_kcopyd_client_create(void); void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc); /* -- cgit v1.2.3