summaryrefslogtreecommitdiff
path: root/drivers/gpu/host1x
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2019-02-01 14:28:27 +0100
committerThierry Reding <treding@nvidia.com>2019-02-07 18:28:57 +0100
commit38fabcc953883741313d707a919326f77c2d7214 (patch)
treec05dab3f17eb6ab2a05cdb3645ed6d99ff9703f4 /drivers/gpu/host1x
parent67a82dbc0a374df7a348cc8fb28982945035bd25 (diff)
gpu: host1x: Restrict IOVA space to DMA mask
On Tegra186 and later, the ARM SMMU provides an input address space that is 48 bits wide. However, memory clients can only address up to 40 bits. If the geometry is used as-is, allocations of IOVA space can end up in a region that is not addressable by the memory clients. To fix this, restrict the IOVA space to the DMA mask of the host1x device. Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/host1x')
-rw-r--r--drivers/gpu/host1x/dev.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 4c044ee54fe6..544b67f2b3ff 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -283,6 +283,8 @@ static int host1x_probe(struct platform_device *pdev)
host->group = iommu_group_get(&pdev->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
+ u64 mask = dma_get_mask(host->dev);
+ dma_addr_t start, end;
unsigned long order;
err = iova_cache_get();
@@ -310,11 +312,12 @@ static int host1x_probe(struct platform_device *pdev)
}
geometry = &host->domain->geometry;
+ start = geometry->aperture_start & mask;
+ end = geometry->aperture_end & mask;
order = __ffs(host->domain->pgsize_bitmap);
- init_iova_domain(&host->iova, 1UL << order,
- geometry->aperture_start >> order);
- host->iova_end = geometry->aperture_end;
+ init_iova_domain(&host->iova, 1UL << order, start >> order);
+ host->iova_end = end;
}
skip_iommu: