diff options
| author | Hari Kanigeri <h-kanigeri2@ti.com> | 2011-05-31 09:24:36 +0100 |
|---|---|---|
| committer | Andy Green <andy.green@linaro.org> | 2011-05-31 11:06:06 +0100 |
| commit | 0eca056750a13de1ea4cd5c3923ec011b3581e36 (patch) | |
| tree | e5f7dfe08654c7621d50b35de2e501a52987c33e /arch | |
| parent | 2ffbd459a42235067a33d125734a3a698d51f954 (diff) | |
omap:iommu-dmm fixes
This fixes the following:
1. pgd and pte entries weren't getting flushed out leading to MMU faults.
2. Cache invalidate was setting wrong size parameter to
memory_regain_ownership leading cache invalidate function to fail.
Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/arm/plat-omap/iodmm.c | 18 | ||||
| -rw-r--r-- | arch/arm/plat-omap/iommu.c | 12 | ||||
| -rw-r--r-- | arch/arm/plat-omap/iovmm.c | 33 |
3 files changed, 48 insertions, 15 deletions
diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c index d16743093b1..315f1570be4 100644 --- a/arch/arm/plat-omap/iodmm.c +++ b/arch/arm/plat-omap/iodmm.c @@ -394,7 +394,7 @@ int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size, goto err_out; } - if (memory_regain_ownership(dev, map_obj, (u32) pva, ul_size, dir)) { + if (memory_regain_ownership(dev, map_obj, va_align, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pva, ul_size); status = -EFAULT; @@ -456,6 +456,7 @@ int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size, u32 pa; unsigned int pages; struct iotlb_entry tlb_entry; + struct page *mapped_page; if (!size || !usr_pgs) return -EINVAL; @@ -480,27 +481,28 @@ int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size, for (pg_i = 0; pg_i < pages; pg_i++) { pg_num = get_user_pages(current, mm, uva, 1, - w, 1, usr_pgs, NULL); + w, 1, &mapped_page, NULL); if (pg_num > 0) { - if (page_count(*usr_pgs) < 1) { + if (page_count(mapped_page) < 1) { pr_err("Bad page count after doing" "get_user_pages on" "user buffer\n"); break; } tlb_entry.pgsz = MMU_CAM_PGSZ_4K; - tlb_entry.prsvd = MMU_CAM_P; + tlb_entry.prsvd = 0; tlb_entry.valid = MMU_CAM_V; - tlb_entry.elsz = MMU_RAM_ELSZ_8; + tlb_entry.elsz = MMU_RAM_ELSZ_32; tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE; - tlb_entry.mixed = 0; + tlb_entry.mixed = MMU_RAM_MIXED; tlb_entry.da = da; - pa = page_to_phys(*usr_pgs); + pa = page_to_phys(mapped_page); tlb_entry.pa = (u32)pa; iopgtable_store_entry(mmu, &tlb_entry); + if (usr_pgs) + usr_pgs[pg_i] = mapped_page; da += PAGE_SIZE; uva += PAGE_SIZE; - usr_pgs++; } else { pr_err("get_user_pages FAILED," "MPU addr = 0x%x," diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index de5283e30e9..b23a1806262 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c @@ -470,7 +470,7 @@ static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) return ERR_PTR(-ENOMEM); *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; - flush_iopgd_range(iopgd, iopgd); + flush_iopgd_range(iopgd, iopgd + 1); dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); } else { @@ -499,7 +499,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) } *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; - flush_iopgd_range(iopgd, iopgd); + flush_iopgd_range(iopgd, iopgd + 1); return 0; } @@ -516,7 +516,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) for (i = 0; i < 16; i++) *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; - flush_iopgd_range(iopgd, iopgd + 15); + flush_iopgd_range(iopgd, iopgd + 16); return 0; } @@ -529,7 +529,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) return PTR_ERR(iopte); *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; - flush_iopte_range(iopte, iopte); + flush_iopte_range(iopte, iopte + 1); dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", __func__, da, pa, iopte, *iopte); @@ -554,7 +554,7 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) for (i = 0; i < 16; i++) *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; - flush_iopte_range(iopte, iopte + 15); + flush_iopte_range(iopte, iopte + 16); return 0; } @@ -725,7 +725,7 @@ void iopgtable_clear_entry_all(struct iommu *obj) iopte_free(iopte_offset(iopgd, 0)); *iopgd = 0; - flush_iopgd_range(iopgd, iopgd); + flush_iopgd_range(iopgd, iopgd + 1); } flush_iotlb_all(obj); diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 5afe7bc89e3..ddf1f8a37ec 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -98,6 +98,24 @@ err_out: return -ENOMEM; } +static int omap_delete_vmm_pool(struct iodmm_struct *obj, int pool_id) +{ + struct iovmm_pool *pool; + struct iovmm_device *iovmm_obj = obj->iovmm; + struct list_head *_pool, *_next_pool; + + list_for_each_safe(_pool, _next_pool, &iovmm_obj->mmap_pool) { + pool = list_entry(_pool, struct iovmm_pool, list); + if (pool->pool_id == pool_id) { + gen_pool_destroy(pool->genpool); + list_del(&pool->list); + kfree(pool); + return 0; + } + } + return -ENODEV; +} + static int omap_iovmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args) { @@ -289,8 +307,21 @@ static int omap_iovmm_ioctl(struct inode *inode, struct file *filp, ret = status; break; } - case IOVMM_IOCDATOPA: case IOVMM_IOCDELETEPOOL: + { + int pool_id; + int size; + + size = copy_from_user(&pool_id, (void __user *)args, + sizeof(int)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + ret = omap_delete_vmm_pool(obj, pool_id); + break; + } + case IOVMM_IOCDATOPA: default: return -ENOTTY; } |
