summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 11:16:11 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-06 11:16:11 -0800
commit3d4d4582e5b3f67a68f2cf32fd5b70d8d80f119d (patch)
tree18d270847537d1a9d1a396d03e585654130630db /drivers
parent8f1bfa4c5c093e97154be4ec969bdf7190aeff6a (diff)
parent47437b2c9a64315efeb3d84e97ffefd6c3c67ef1 (diff)
Merge branch 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop into fix
* 'async-tx-for-linus' of git://lost.foo-projects.org/~dwillia2/git/iop: async_tx: allow architecture specific async_tx_find_channel implementations async_tx: replace 'int_en' with operation preparation flags async_tx: kill tx_set_src and tx_set_dest methods async_tx: kill ASYNC_TX_ASSUME_COHERENT iop-adma: use LIST_HEAD instead of LIST_HEAD_INIT async_tx: use LIST_HEAD instead of LIST_HEAD_INIT async_tx: fix compile breakage, mark do_async_xor __always_inline
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c49
-rw-r--r--drivers/dma/ioat_dma.c43
-rw-r--r--drivers/dma/iop-adma.c138
4 files changed, 96 insertions, 135 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c46b7c219ee..a703deffb79 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -5,6 +5,7 @@
menuconfig DMADEVICES
bool "DMA Engine support"
depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ depends on !HIGHMEM64G
help
DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df3033..29965231b91 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
+ DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 45e7b4666c7..dff38accc5c 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
return device->common.chancnt;
}
-static void ioat_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->src = addr;
-}
-
-static void ioat_set_dest(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->dst = addr;
-}
-
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
- desc_sw->async_tx.tx_set_src = ioat_set_src;
- desc_sw->async_tx.tx_set_dest = ioat_set_dest;
switch (ioat_chan->device->version) {
case IOAT_VER_1_2:
desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *dest;
struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int err = 0;
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto out;
}
- tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+ DMA_FROM_DEVICE);
+ tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+ IOAT_TEST_SIZE, 0);
if (!tx) {
dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n");
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
async_tx_ack(tx);
- addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086;
cookie = tx->tx_submit(tx);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e5c62b75f36..3986d54492b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
int slots_per_op)
{
struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
- struct list_head chain = LIST_HEAD_INIT(chain);
+ LIST_HEAD(chain);
int slots_found, retry = 0;
/* start search from the last allocated descrtiptor
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
return cookie;
}
-static void
-iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
-
- /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
- iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
-}
-
static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = iop_adma_tx_submit;
- slot->async_tx.tx_set_dest = iop_adma_set_dest;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list);
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_memcpy_src_addr(grp_start, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
+iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memcpy(grp_start, int_en);
+ iop_desc_init_memcpy(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
+ iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
}
spin_unlock_bh(&iop_chan->lock);
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
}
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
- int int_en)
+iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
+ int value, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memset(grp_start, int_en);
+ iop_desc_init_memset(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_block_fill_val(grp_start, value);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
}
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_xor_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
- int int_en)
+iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u int_en: %d\n",
- __FUNCTION__, src_cnt, len, int_en);
+ "%s src_cnt: %d len: %u flags: %lx\n",
+ __FUNCTION__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_xor(grp_start, src_cnt, int_en);
+ iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;
+ while (src_cnt--)
+ iop_desc_set_xor_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
- size_t len, u32 *result, int int_en)
+iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
+ unsigned int src_cnt, size_t len, u32 *result,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_zero_sum(grp_start, src_cnt, int_en);
+ iop_desc_init_zero_sum(grp_start, src_cnt, flags);
iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__FUNCTION__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;
+ while (src_cnt--)
+ iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
goto out;
}
- tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
dest_dma = dma_map_single(dma_chan->device->dev, dest,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
src_dma = dma_map_single(dma_chan->device->dev, src,
IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
- iop_adma_memcpy_set_src(src_dma, tx, 0);
+ tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
+ IOP_ADMA_TEST_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
struct page *dest;
struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
+ dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_addr, dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test xor */
- tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
- PAGE_SIZE, 1);
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
-
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,
- PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
zero_sum_result = 1;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test memset */
- tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dma_addr, tx, 0);
+ tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
/* test for non-zero parity sum */
zero_sum_result = 0;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);