summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRabin Vincent <rabin.vincent@stericsson.com>2011-01-25 11:18:33 +0100
committerRabin VINCENT <rabin.vincent@stericsson.com>2011-09-22 10:13:18 +0200
commite2d441872442cbb1a7319304654cf14aaa335dea (patch)
treeb878da72aba16e8e3a1fb1b251c399c7111ef5d6
parentcfbb762e2bfba84817786dd062bac85772183320 (diff)
dma40: fix DMA_SG capability and channels
The DMA_SG cap is enabled on the wrong channel, and the pointers are repeatedly set incorrectly. Fix it and combine the ops settings to a common function. Change-Id: I93644620f54c8773405a54dbcfc7f2c59af73d00 Acked-by: Per Forlin <per.forlin@stericsson.com> Acked-by: Jonas Aaberg <jonas.aberg@stericsson.com> Signed-off-by: Rabin Vincent <rabin.vincent@stericsson.com> Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/31683 Tested-by: Narayanan GOPALAKRISHNAN <narayanan.gopalakrishnan@stericsson.com>
-rw-r--r--drivers/dma/ste_dma40.c66
1 files changed, 32 insertions, 34 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index a640555d87b..bc6f5a44bc9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3076,6 +3076,32 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
}
}
+static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
+{
+ if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
+ dev->device_prep_slave_sg = d40_prep_slave_sg;
+
+ if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
+ dev->device_prep_dma_memcpy = d40_prep_memcpy;
+
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ dev->copy_align = 2;
+ }
+
+ if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
+ dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
+
+ dev->device_alloc_chan_resources = d40_alloc_chan_resources;
+ dev->device_free_chan_resources = d40_free_chan_resources;
+ dev->device_issue_pending = d40_issue_pending;
+ dev->device_tx_status = d40_tx_status;
+ dev->device_control = d40_control;
+ dev->dev = base->dev;
+}
+
static int __init d40_dmaengine_init(struct d40_base *base,
int num_reserved_chans)
{
@@ -3086,16 +3112,9 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
- base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
- base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_slave.device_prep_dma_cyclic = dma40_prep_dma_cyclic;
- base->dma_slave.device_tx_status = d40_tx_status;
- base->dma_slave.device_control = d40_control;
- base->dma_slave.device_issue_pending = d40_issue_pending;
- base->dma_slave.dev = base->dev;
+ d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave);
@@ -3112,20 +3131,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
- base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
- base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_memcpy.device_prep_dma_cyclic = dma40_prep_dma_cyclic;
- base->dma_memcpy.device_tx_status = d40_tx_status;
- base->dma_memcpy.device_control = d40_control;
- base->dma_memcpy.device_issue_pending = d40_issue_pending;
- base->dma_memcpy.dev = base->dev;
- /*
- * This controller can only access address at even
- * 32bit boundaries, i.e. 2^2
- */
- base->dma_memcpy.copy_align = 2;
+ d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy);
@@ -3142,18 +3148,10 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero(base->dma_both.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+ dma_cap_set(DMA_CYCLIC, base->dma_both.cap_mask);
+
+ d40_ops_init(base, &base->dma_both);
- base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
- base->dma_both.device_free_chan_resources = d40_free_chan_resources;
- base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
- base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
- base->dma_both.device_prep_dma_cyclic = dma40_prep_dma_cyclic;
- base->dma_both.device_tx_status = d40_tx_status;
- base->dma_both.device_control = d40_control;
- base->dma_both.device_issue_pending = d40_issue_pending;
-
- base->dma_both.dev = base->dev;
- base->dma_both.copy_align = 2;
err = dma_async_device_register(&base->dma_both);
if (err) {