diff options
author | Hari Kanigeri <h-kanigeri2@ti.com> | 2011-05-31 09:24:37 +0100 |
---|---|---|
committer | Andy Green <andy.green@linaro.org> | 2011-05-31 11:06:07 +0100 |
commit | b2ec4a174d37c814675b8a792f19025f6c8b5d71 (patch) | |
tree | db4f482e8452cb6a0fc588f986b26cae666e4f6b /arch | |
parent | eb0e5198252f006ce0b31085a4bc2af0d833f166 (diff) |
omap:iommu-enable user to va support
Enabled the support to map the buffer that is passed through
mmap.
Also comment out the vmm delete pool function as this is broken.
Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/plat-omap/iodmm.c | 46 | ||||
-rw-r--r-- | arch/arm/plat-omap/iovmm.c | 7 |
2 files changed, 53 insertions, 0 deletions
diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c index 315f1570be4..ec7843e0bc6 100644 --- a/arch/arm/plat-omap/iodmm.c +++ b/arch/arm/plat-omap/iodmm.c @@ -635,6 +635,33 @@ int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size) return 0; } +/* + * ======== user_va2_pa ======== + * Purpose: + * This function walks through the page tables to convert a userland + * virtual address to physical address + */ +static u32 user_va2_pa(struct mm_struct *mm, u32 address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *ptep, pte; + + pgd = pgd_offset(mm, address); + if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { + pmd = pmd_offset(pgd, address); + if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { + ptep = pte_offset_map(pmd, address); + if (ptep) { + pte = *ptep; + if (pte_present(pte)) + return pte & PAGE_MASK; + } + } + } + return 0; +} + int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da, u32 va, size_t bytes, u32 flags) { @@ -645,6 +672,25 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da, struct iovmm_device *iovmm_obj = obj->iovmm; u32 pa_align, da_align, size_align, tmp_addr; int err; + int i, num_of_pages; + struct page *pg; + + if (flags == IOVMF_DA_PHYS) { + /* Calculate the page-aligned PA, VA and size */ + pa_align = round_down((u32) va, PAGE_SIZE); + size_align = round_up(bytes + va - pa_align, PAGE_SIZE); + da_align = user_va2_pa(current->mm, va); + *da = (da_align | (va & (PAGE_SIZE - 1))); + dmm_obj = add_mapping_info(obj, NULL, va, da_align, + size_align); + num_of_pages = size_align/PAGE_SIZE; + for (i = 0; i < num_of_pages; i++) { + pg = phys_to_page(da_align); + da_align += PAGE_SIZE; + dmm_obj->pages[i] = pg; + } + return 0; + } list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) { if (pool->pool_id == pool_id) { diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 98b694b1cbe..c13d7a93797 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -100,6 +100,11 @@ err_out: static int omap_delete_vmm_pool(struct iodmm_struct *obj, int pool_id) { + /* + TBD: This is broken due to unmap not workign + Enable this once unmap is woring + */ +#if 0 struct iovmm_pool *pool; struct iovmm_device *iovmm_obj = obj->iovmm; struct list_head *_pool, *_next_pool; @@ -114,6 +119,8 @@ static int omap_delete_vmm_pool(struct iodmm_struct *obj, int pool_id) } } return -ENODEV; +#endif + return 0; } static int omap_iovmm_ioctl(struct inode *inode, struct file *filp, |