summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRobin Murphy <Robin.Murphy@arm.com>2015-07-17 16:58:21 +0100
committerWill Deacon <will.deacon@arm.com>2015-07-27 11:08:40 +0100
commit1d1ddf67dc3bfd80f60b216fa1fedfb242bee299 (patch)
treee62ad9b0949e30bc058c8aea75ce11dcb38e72da /arch/arm64/mm/dma-mapping.c
parent4b3dc9679cf779339d9049800803dfc3c83433d1 (diff)
arm64: dma-mapping: implement dma_get_sgtable()
The default dma_common_get_sgtable() implementation relies on the CPU address of the buffer being a regular lowmem address. This is not always the case on arm64, since allocations from the various DMA pools may have remapped vmalloc addresses, rendering the use of virt_to_page() invalid. Fix this by providing our own implementation based on the fact that we can safely derive a physical address from the DMA address in both cases. CC: Jon Medhurst <tixy@linaro.org> Signed-off-by: Robin Murphy <robin.murphy@arm.com> [will: made static] Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 63b2a117a03c..e5d74cdfdb71 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -331,10 +331,24 @@ static int __swiotlb_mmap(struct device *dev,
return ret;
}
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+ if (!ret)
+ sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+ PAGE_ALIGN(size), 0);
+
+ return ret;
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
+ .get_sgtable = __swiotlb_get_sgtable,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,