diff options
author | Hari Kanigeri <h-kanigeri2@ti.com> | 2011-04-07 08:43:27 +0100 |
---|---|---|
committer | Andy Green <andy.green@linaro.org> | 2011-04-07 08:43:27 +0100 |
commit | 9dbc9bff6359b6592e1d4caa57a728dd291e2805 (patch) | |
tree | 17b0ea7d8764730e0315811cd332201a26b68b26 | |
parent | f554e93a6aa4225e2f61de4030b94a34ffc20c53 (diff) |
omap:iommu -Inital support for DMM using iommu
omap:iommu-ported dspbridge's dmm code to iommu
This patch ports the dspbridge's dmm code to iommu location.
Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
Signed-off-by: Ramesh Gupta <grgupta@ti.com>
omap:iommu-added cache flushing operation for L2 cache
Signed-off-by: Ramesh Gupta <grgupta@ti.com>
Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
omap:iovmm-add interface to userspace DMM
signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
signed-off-by: Ramesh Gupta <grgupta@ti.com>
omap:iommu-event notification to userspace
Implement iommu event notifications to userspace using eventfd
Signed-off-by: Hari Kanigeri <h-kanigeri2@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
IODMM add support for unmap functionality
Adds unmap functionality , adds the map information
of loadable sections to the mapped list.
Signed-off-by: Ramesh Gupta G <grgupta@ti.com>
-rw-r--r-- | arch/arm/mach-omap2/omap-iommu.c | 5 | ||||
-rw-r--r-- | arch/arm/plat-omap/Kconfig | 6 | ||||
-rw-r--r-- | arch/arm/plat-omap/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iommu.h | 16 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iommu2.h | 5 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/iovmm.h | 107 | ||||
-rw-r--r-- | arch/arm/plat-omap/iodmm.c | 753 | ||||
-rw-r--r-- | arch/arm/plat-omap/iommu.c | 38 | ||||
-rw-r--r-- | arch/arm/plat-omap/iovmm.c | 372 |
9 files changed, 1280 insertions, 26 deletions
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index 78f49e8996e..925fc2252d3 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -122,6 +122,11 @@ static int __init omap_iommu_init(void) ohl, ohl_cnt, false); WARN(IS_ERR(od), "Could not build omap_device" "for %s %s\n", "omap-iommu", data->oh_name); + od = omap_device_build("omap-iovmm", i, oh, + data, sizeof(*data), + ohl, ohl_cnt, false); + WARN(IS_ERR(od), "Could not build omap_device" + "for %s %s\n", "omap-iovmm", data->oh_name); } return 0; } diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e38aaba7b89..d65a5b2f143 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -141,6 +141,12 @@ config OMAP_REMOTE_PROC config OMAP_IOMMU tristate + select GENERIC_ALLOCATOR + +config OMAP_IOVMM + depends on OMAP_IOMMU + select GENERIC_ALLOCATOR + tristate config OMAP_USER_DMM tristate diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile index 325febd479f..6c8416bd697 100644 --- a/arch/arm/plat-omap/Makefile +++ b/arch/arm/plat-omap/Makefile @@ -18,7 +18,9 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o obj-$(CONFIG_ARCH_OMAP4) += omap_device.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o -obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o +obj-$(CONFIG_OMAP_IOMMU) += iommu.o +obj-y += iovmm.o +obj-y += iodmm.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o obj-$(CONFIG_OMAP_USER_DMM) += dmm_user.o iodmm.o diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index d3bf263fd54..1959ecbafb1 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h @@ -13,6 +13,8 @@ #ifndef __MACH_IOMMU_H #define __MACH_IOMMU_H +#include <linux/list.h> + struct iotlb_entry { u32 da; u32 pa; @@ -41,17 +43,16 @@ struct iommu { */ u32 *iopgd; spinlock_t page_table_lock; /* protect iopgd */ - int nr_tlb_entries; struct list_head mmap; struct mutex mmap_lock; /* protect mmap */ - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv); - void *ctx; /* iommu context: registres saved area */ u32 da_start; u32 da_end; + struct platform_device *pdev; + struct list_head event_list; }; struct cr_regs { @@ -71,6 +72,12 @@ struct cr_regs { }; }; +struct iommu_event_ntfy { + u32 fd; + struct eventfd_ctx *evt_ctx; + struct list_head list; +}; + struct iotlb_lock { short base; short vict; @@ -166,6 +173,7 @@ extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte); extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova); +extern void iopgtable_clear_entry_all(struct iommu *obj); extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end); extern struct iommu *iommu_get(const char *name); @@ -186,5 +194,5 @@ extern int foreach_iommu_device(void *data, extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); - +extern int iommu_get_plat_data_size(void); #endif /* __MACH_IOMMU_H */ diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f410e..9b5c4c9f9e6 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h @@ -42,6 +42,11 @@ /* * MMU Register bit definitions */ +#define PAGE_SIZE_4KB 0x1000 +#define PAGE_SIZE_64KB 0x10000 +#define PAGE_SIZE_1MB 0x100000 +#define PAGE_SIZE_16MB 0x1000000 + #define MMU_LOCK_BASE_SHIFT 10 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) #define MMU_LOCK_BASE(x) \ diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index 32a2f6c4d39..267aefaf52f 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h @@ -9,18 +9,106 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/device.h> +#include <linux/cdev.h> +#include <linux/dma-mapping.h> #ifndef __IOMMU_MMAP_H #define __IOMMU_MMAP_H + +#define IOVMM_IOC_MAGIC 'V' + +#define IOVMM_IOCSETTLBENT _IO(IOVMM_IOC_MAGIC, 0) +#define IOVMM_IOCMEMMAP _IO(IOVMM_IOC_MAGIC, 1) +#define IOVMM_IOCMEMUNMAP _IO(IOVMM_IOC_MAGIC, 2) +#define IOVMM_IOCDATOPA _IO(IOVMM_IOC_MAGIC, 3) +#define IOVMM_IOCMEMFLUSH _IO(IOVMM_IOC_MAGIC, 4) +#define IOVMM_IOCMEMINV _IO(IOVMM_IOC_MAGIC, 5) +#define IOVMM_IOCCREATEPOOL _IO(IOVMM_IOC_MAGIC, 6) +#define IOVMM_IOCDELETEPOOL _IO(IOVMM_IOC_MAGIC, 7) +#define IOVMM_IOCSETPTEENT _IO(IOVMM_IOC_MAGIC, 8) +#define IOVMM_IOCCLEARPTEENTRIES _IO(IOVMM_IOC_MAGIC, 9) +#define IOMMU_IOCEVENTREG _IO(IOVMM_IOC_MAGIC, 10) +#define IOMMU_IOCEVENTUNREG _IO(IOVMM_IOC_MAGIC, 11) + + +struct iovmm_pool { + u32 pool_id; + u32 da_begin; + u32 da_end; + struct gen_pool *genpool; + struct list_head list; +}; + +struct iovmm_pool_info { + u32 pool_id; + u32 da_begin; + u32 da_end; + u32 size; + u32 flags; +}; + +/* used to cache dma mapping information */ +struct device_dma_map_info { + /* direction of DMA in action, or DMA_NONE */ + enum dma_data_direction dir; + /* number of elements requested by us */ + int num_pages; + /* number of elements returned from dma_map_sg */ + int sg_num; + /* list of buffers used in this DMA action */ + struct scatterlist *sg; +}; + +struct dmm_map_info { + u32 mpu_addr; + u32 *da; + u32 num_of_buf; + u32 size; + u32 mem_pool_id; + u32 flags; +}; + +struct dmm_map_object { + struct list_head link; + u32 da; + u32 va; + u32 size; + u32 num_usr_pgs; + struct gen_pool *gen_pool; + struct page **pages; + struct device_dma_map_info dma_info; +}; + +struct iodmm_struct { + struct iovmm_device *iovmm; + struct list_head map_list; + u32 pool_id; + spinlock_t dmm_map_lock; +}; + +struct iovmm_device { + /* iommu object which this belongs to */ + struct iommu *iommu; + const char *name; + /* List of memory pool it manages */ + struct list_head mmap_pool; + int minor; + struct cdev cdev; +}; + struct iovm_struct { struct iommu *iommu; /* iommu object which this belongs to */ + const char *name; u32 da_start; /* area definition */ u32 da_end; u32 flags; /* IOVMF_: see below */ struct list_head list; /* linked in ascending order */ const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */ void *va; /* mpu side mapped address */ + int minor; + struct cdev cdev; }; /* @@ -71,7 +159,14 @@ struct iovm_struct { #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) +#define IOVMF_DA_ANON (2 << (4 + IOVMF_SW_SHIFT)) +#define IOVMF_DA_MASK (3 << (4 + IOVMF_SW_SHIFT)) +#define IOVMF_DA_PHYS (4 << (4 + IOVMF_SW_SHIFT)) +#define IOVMF_DA_USER (5 << (4 + IOVMF_SW_SHIFT)) +struct iovmm_platform_data { + const char *name; +}; extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); extern u32 iommu_vmap(struct iommu *obj, u32 da, @@ -88,5 +183,17 @@ extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, extern void iommu_kfree(struct iommu *obj, u32 da); extern void *da_to_va(struct iommu *obj, u32 da); +/* user dmm functions */ +extern int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da, + u32 va, size_t bytes, u32 flags); +extern struct dmm_map_object *add_mapping_info(struct iodmm_struct *obj, + struct gen_pool *gen_pool, u32 va, u32 da, u32 size); +extern int device_flush_memory(struct iodmm_struct *obj, void *pva, + u32 ul_size, u32 ul_flags); +extern int device_invalidate_memory(struct iodmm_struct *obj, void *pva, + u32 size); +extern void user_remove_resources(struct iodmm_struct *obj); +extern int user_un_map(struct iodmm_struct *obj, u32 map_addr); + #endif /* __IOMMU_MMAP_H */ diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c new file mode 100644 index 00000000000..f555c83f518 --- /dev/null +++ b/arch/arm/plat-omap/iodmm.c @@ -0,0 +1,753 @@ +/* + * OMAP DMM (Dynamic memory mapping) to IOMMU module + * + * Copyright (C) 2010 Texas Instruments. All rights reserved. + * + * Authors: Ramesh Gupta <grgupta@ti.com> + * Hari Kanigeri <h-kanigeri2@ti.com> + * Ohad Ben-Cohen <ohad@wizery.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/scatterlist.h> +#include <linux/platform_device.h> +#include <asm/cacheflush.h> +#include <asm/mach/map.h> + +#include <plat/iommu.h> +#include <plat/iovmm.h> + +#include "iopgtable.h" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/swap.h> +#include <linux/genalloc.h> + + +#define POOL_NONE -1 + +/* remember mapping information */ +struct dmm_map_object *add_mapping_info(struct iodmm_struct *obj, + struct gen_pool *gen_pool, u32 va, u32 da, u32 size) +{ + struct dmm_map_object *map_obj; + + u32 num_usr_pgs = size / PAGE_SIZE; + + pr_debug("%s: adding map info: va 0x%x virt 0x%x size 0x%x\n", + __func__, va, + da, size); + map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); + if (!map_obj) { + pr_err("%s: kzalloc failed\n", __func__); + return NULL; + } + INIT_LIST_HEAD(&map_obj->link); + + map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), + GFP_KERNEL); + if (!map_obj->pages) { + pr_err("%s: kzalloc failed\n", __func__); + kfree(map_obj); + return NULL; + } + + map_obj->va = va; + map_obj->da = da; + map_obj->size = size; + map_obj->num_usr_pgs = num_usr_pgs; + map_obj->gen_pool = gen_pool; + spin_lock(&obj->dmm_map_lock); + list_add(&map_obj->link, &obj->map_list); + spin_unlock(&obj->dmm_map_lock); + + return map_obj; +} + +static int match_exact_map_obj(struct dmm_map_object *map_obj, + u32 da, u32 size) +{ + u32 res; + + if (map_obj->da == da && map_obj->size != size) + pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", + __func__, da, map_obj->size, size); + + if (map_obj->da == da && map_obj->size == size) + res = 0; + else + res = -ENODATA; + return res; +} + +static void remove_mapping_information(struct iodmm_struct *obj, + u32 da, u32 size) +{ + struct dmm_map_object *map_obj; + + pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, + da, size); + spin_lock(&obj->dmm_map_lock); + list_for_each_entry(map_obj, &obj->map_list, link) { + pr_debug("%s: candidate: va 0x%x virt 0x%x size 0x%x\n", + __func__, + map_obj->va, + map_obj->da, + map_obj->size); + + if (!match_exact_map_obj(map_obj, da, size)) { + pr_debug("%s: match, deleting map info\n", __func__); + if (map_obj->gen_pool != POOL_NONE) + gen_pool_free(map_obj->gen_pool, da, size); + list_del(&map_obj->link); + kfree(map_obj->dma_info.sg); + kfree(map_obj->pages); + kfree(map_obj); + goto out; + } + pr_debug("%s: candidate didn't match\n", __func__); + } + + pr_err("%s: failed to find given map info\n", __func__); +out: + spin_unlock(&obj->dmm_map_lock); +} + +static int match_containing_map_obj(struct dmm_map_object *map_obj, + u32 va, u32 size) +{ + u32 res; + u32 map_obj_end = map_obj->va + map_obj->size; + + if ((va >= map_obj->va) && (va + size <= map_obj_end)) + res = 0; + else + res = -ENODATA; + + return res; +} + +static struct dmm_map_object *find_containing_mapping( + struct iodmm_struct *obj, + u32 va, u32 size) +{ + struct dmm_map_object *map_obj, *temp_map; + pr_debug("%s: looking for va 0x%x size 0x%x\n", __func__, + va, size); + + spin_lock(&obj->dmm_map_lock); + list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) { + pr_debug("%s: candidate: va 0x%x virt 0x%x size 0x%x\n", + __func__, + map_obj->va, + map_obj->da, + map_obj->size); + if (!match_containing_map_obj(map_obj, va, map_obj->size)) { + pr_debug("%s: match!\n", __func__); + goto out; + } + + pr_debug("%s: no match!\n", __func__); + } + + map_obj = NULL; +out: + spin_unlock(&obj->dmm_map_lock); + return map_obj; +} + +static int find_first_page_in_cache(struct dmm_map_object *map_obj, + unsigned long va) +{ + u32 mapped_base_page = map_obj->va >> PAGE_SHIFT; + u32 requested_base_page = va >> PAGE_SHIFT; + int pg_index = requested_base_page - mapped_base_page; + + if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { + pr_err("%s: failed (got %d)\n", __func__, pg_index); + return -1; + } + + pr_debug("%s: first page is %d\n", __func__, pg_index); + return pg_index; +} + +static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, + int pg_i) +{ + pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, + pg_i, map_obj->num_usr_pgs); + + if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { + pr_err("%s: requested pg_i %d is out of mapped range\n", + __func__, pg_i); + return NULL; + } + + return map_obj->pages[pg_i]; +} + +/* Cache operation against kernel address instead of users */ +static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, + ssize_t len, int pg_i) +{ + struct page *page; + unsigned long offset; + ssize_t rest; + int ret = 0, i = 0; + struct scatterlist *sg = map_obj->dma_info.sg; + + while (len) { + page = get_mapping_page(map_obj, pg_i); + if (!page) { + pr_err("%s: no page for %08lx\n", __func__, start); + ret = -EINVAL; + goto out; + } else if (IS_ERR(page)) { + pr_err("%s: err page for %08lx(%lu)\n", __func__, start, + PTR_ERR(page)); + ret = PTR_ERR(page); + goto out; + } + + offset = start & ~PAGE_MASK; + rest = min_t(ssize_t, PAGE_SIZE - offset, len); + + sg_set_page(&sg[i], page, rest, offset); + + len -= rest; + start += rest; + pg_i++, i++; + } + + if (i != map_obj->dma_info.num_pages) { + pr_err("%s: bad number of sg iterations\n", __func__); + ret = -EFAULT; + goto out; + } + +out: + return ret; +} + +static int memory_regain_ownership(struct device *dev, + struct dmm_map_object *map_obj, unsigned long start, + ssize_t len, enum dma_data_direction dir) +{ + int ret = 0; + unsigned long first_data_page = start >> PAGE_SHIFT; + unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); + /* calculating the number of pages this area spans */ + unsigned long num_pages = last_data_page - first_data_page + 1; + struct device_dma_map_info *dma_info = &map_obj->dma_info; + + if (!dma_info->sg) + goto out; + + if (dma_info->dir != dir || dma_info->num_pages != num_pages) { + pr_err("%s: dma info doesn't match given params\n", __func__); + return -EINVAL; + } + + dma_unmap_sg(dev, dma_info->sg, num_pages, dma_info->dir); + + pr_debug("%s: dma_map_sg unmapped\n", __func__); + + kfree(dma_info->sg); + + map_obj->dma_info.sg = NULL; + +out: + return ret; +} + +/* Cache operation against kernel address instead of users */ +static int memory_give_ownership(struct device *dev, + struct dmm_map_object *map_obj, unsigned long start, + ssize_t len, enum dma_data_direction dir) +{ + int pg_i, ret, sg_num; + struct scatterlist *sg; + unsigned long first_data_page = start >> PAGE_SHIFT; + unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); + /* calculating the number of pages this area spans */ + unsigned long num_pages = last_data_page - first_data_page + 1; + + pg_i = find_first_page_in_cache(map_obj, start); + if (pg_i < 0) { + pr_err("%s: failed to find first page in cache\n", __func__); + ret = -EINVAL; + goto out; + } + + sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); + if (!sg) { + pr_err("%s: kcalloc failed\n", __func__); + ret = -ENOMEM; + goto out; + } + + sg_init_table(sg, num_pages); + + /* cleanup a previous sg allocation */ + /* this may happen if application doesn't signal for e/o DMA */ + kfree(map_obj->dma_info.sg); + + map_obj->dma_info.sg = sg; + map_obj->dma_info.dir = dir; + map_obj->dma_info.num_pages = num_pages; + + ret = build_dma_sg(map_obj, start, len, pg_i); + if (ret) + goto kfree_sg; + + sg_num = dma_map_sg(dev, sg, num_pages, dir); + if (sg_num < 1) { + pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); + ret = -EFAULT; + goto kfree_sg; + } + + pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); + map_obj->dma_info.sg_num = sg_num; + + return 0; + +kfree_sg: + kfree(sg); + map_obj->dma_info.sg = NULL; +out: + return ret; +} + +int proc_begin_dma(struct iodmm_struct *obj, void *pva, u32 ul_size, + enum dma_data_direction dir) +{ + /* Keep STATUS here for future additions to this function */ + int status = 0; + struct dmm_map_object *map_obj; + struct device *dev = obj->iovmm->iommu->dev; + + pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, + (u32)pva, + ul_size, dir); + + /* find requested memory are in cached mapping information */ + map_obj = find_containing_mapping(obj, (u32) pva, ul_size); + if (!map_obj) { + pr_err("%s: find_containing_mapping failed\n", __func__); + status = -EFAULT; + goto err_out; + } + + if (memory_give_ownership(dev, map_obj, (u32) pva, ul_size, dir)) { + pr_err("%s: InValid address parameters %p %x\n", + __func__, pva, ul_size); + status = -EFAULT; + } + +err_out: + + return status; +} + +int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size, + enum dma_data_direction dir) +{ + /* Keep STATUS here for future additions to this function */ + int status = 0; + struct dmm_map_object *map_obj; + struct device *dev = obj->iovmm->iommu->dev; + + pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, + (u32)pva, + ul_size, dir); + + /* find requested memory are in cached mapping information */ + map_obj = find_containing_mapping(obj, (u32) pva, ul_size); + if (!map_obj) { + pr_err("%s: find_containing_mapping failed\n", __func__); + status = -EFAULT; + goto err_out; + } + + if (memory_regain_ownership(dev, map_obj, (u32) pva, ul_size, dir)) { + pr_err("%s: InValid address parameters %p %x\n", + __func__, pva, ul_size); + status = -EFAULT; + goto err_out; + } + +err_out: + return status; +} + +/* + * ======== device_flush_memory ======== + * Purpose: + * Flush cache + */ +int device_flush_memory(struct iodmm_struct *obj, void *pva, + u32 ul_size, u32 ul_flags) +{ + enum dma_data_direction dir = DMA_BIDIRECTIONAL; + + return proc_begin_dma(obj, pva, ul_size, dir); +} + +/* + * ======== proc_invalidate_memory ======== + * Purpose: + * Invalidates the memory specified + */ +int device_invalidate_memory(struct iodmm_struct *obj, void *pva, u32 size) +{ + enum dma_data_direction dir = DMA_FROM_DEVICE; + + return proc_begin_dma(obj, pva, size, dir); +} + + +/** + * user_to_device_map() - maps user to dsp virtual address + * @mmu: Pointer to iommu handle. + * @uva: Virtual user space address. + * @da DSP address + * @size Buffer size to map. + * @usr_pgs struct page array pointer where the user pages will be stored + * + * This function maps a user space buffer into DSP virtual address. + * + */ +int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size, + struct page **usr_pgs) + +{ + int res = 0; + int w; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + u32 pg_num; + u32 status; + int pg_i; + u32 pa; + unsigned int pages; + struct iotlb_entry tlb_entry; + + if (!size || !usr_pgs) + return -EINVAL; + + pages = size / PAGE_SIZE; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, uva); + while (vma && (uva + size > vma->vm_end)) + vma = find_vma(mm, vma->vm_end + 1); + + if (!vma) { + pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", + __func__, uva, size); + up_read(&mm->mmap_sem); + res = -EINVAL; + goto end; + } + if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) + w = 1; + up_read(&mm->mmap_sem); + + for (pg_i = 0; pg_i < pages; pg_i++) { + pg_num = get_user_pages(current, mm, uva, 1, + w, 1, usr_pgs, NULL); + printk(KERN_INFO "User VA is 0x%x and Physical is 0x%xand Da" + "is 0x%x\n", uva, page_to_phys(*usr_pgs), da); + if (pg_num > 0) { + if (page_count(*usr_pgs) < 1) { + pr_err("Bad page count after doing" + "get_user_pages on" + "user buffer\n"); + break; + } + tlb_entry.pgsz = MMU_CAM_PGSZ_4K; + tlb_entry.prsvd = MMU_CAM_P; + tlb_entry.valid = MMU_CAM_V; + tlb_entry.elsz = MMU_RAM_ELSZ_8; + tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE; + tlb_entry.mixed = 0; + tlb_entry.da = da; + pa = page_to_phys(*usr_pgs); + tlb_entry.pa = (u32)pa; + iopgtable_store_entry(mmu, &tlb_entry); + da += PAGE_SIZE; + uva += PAGE_SIZE; + } else { + pr_err("DSPBRIDGE: get_user_pages FAILED," + "MPU addr = 0x%x," + "vma->vm_flags = 0x%lx," + "get_user_pages Err" + "Value = %d, Buffer" + "size=0x%x\n", uva, + vma->vm_flags, pg_num, + size); + status = -EFAULT; + break; + } + } +end: + return res; +} + +/** + * phys_to_device_map() - maps physical addr + * to dsp virtual address + * @mmu: Pointer to iommu handle. + * @uva: Virtual user space address. + * @da DSP address + * @size Buffer size to map. + * @usr_pgs struct page array pointer where the user pages will be stored + * + * This function maps a user space buffer into DSP virtual address. + * + */ +int phys_to_device_map(struct iommu *mmu, u32 phys, u32 da, u32 size, + struct page **usr_pgs) +{ + int res = 0; + int w; + u32 pg_num; + u32 status; + int pg_i; + unsigned int pages; + struct iotlb_entry tlb_entry; + + if (!size || !usr_pgs) + return -EINVAL; + + pages = size / PAGE_SIZE; + + for (pg_i = 0; pg_i < pages; pg_i++) { + printk("Phys Addr is 0x%x and Da is 0x%x\n", phys, da); + tlb_entry.pgsz = MMU_CAM_PGSZ_4K; + tlb_entry.prsvd = MMU_CAM_P; + tlb_entry.valid = MMU_CAM_V; + tlb_entry.elsz = MMU_RAM_ELSZ_8; + tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE; + tlb_entry.mixed = 0; + tlb_entry.da = da; + tlb_entry.pa = (u32)phys; + iopgtable_store_entry(mmu, &tlb_entry); + da += PAGE_SIZE; + phys += PAGE_SIZE; + } + + return res; +} + + +/** + * io_to_device_map() - maps io addr + * to device virtual address + * @mmu: Pointer to iommu handle. + * @uva: Virtual user space address. + * @da DSP address + * @size Buffer size to map. + * @usr_pgs struct page array pointer where the user pages will be stored + * + * This function maps a user space buffer into DSP virtual address. + * + */ +int io_to_device_map(struct iommu *mmu, u32 io_addr, u32 da, u32 size, + struct page **usr_pgs) +{ + int res = 0; + int pg_i; + unsigned int pages; + struct iotlb_entry tlb_entry; + + if (!size || !usr_pgs) + return -EINVAL; + + pages = size / PAGE_SIZE; + + for (pg_i = 0; pg_i < pages; pg_i++) { + printk(KERN_INFO "Phys Addr is 0x%x and Da is 0x%x\n", + io_addr, da); + tlb_entry.pgsz = MMU_CAM_PGSZ_4K; + tlb_entry.prsvd = MMU_CAM_P; + tlb_entry.valid = MMU_CAM_V; + tlb_entry.elsz = MMU_RAM_ELSZ_8; + tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE; + tlb_entry.mixed = 0; + tlb_entry.da = da; + tlb_entry.pa = (u32)io_addr; + iopgtable_store_entry(mmu, &tlb_entry); + da += PAGE_SIZE; + io_addr += PAGE_SIZE; + } + + return res; +} + + +/** + * user_to_device_unmap() - unmaps DSP virtual buffer. + * @mmu: Pointer to iommu handle. + * @da DSP address + * + * This function unmaps a user space buffer into DSP virtual address. + * + */ +int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size) +{ + unsigned total = size; + unsigned start = da; + + while (total > 0) { + size_t bytes; + bytes = iopgtable_clear_entry(mmu, start); + if (bytes == 0) + bytes = PAGE_SIZE; + else + dev_dbg(mmu->dev, "%s: unmap %08x(%x) %08x\n", + __func__, start, bytes); + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); + total -= bytes; + start += bytes; + } + BUG_ON(total); + return 0; +} + +int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da, + u32 va, size_t bytes, u32 flags) +{ + bool found = false; + struct iovmm_pool *pool; + struct gen_pool *gen_pool; + struct dmm_map_object *dmm_obj; + struct iovmm_device *iovmm_obj = obj->iovmm; + u32 pa_align, da_align, size_align, tmp_addr; + int err; + + list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) { + if (pool->pool_id == pool_id) { + gen_pool = pool->genpool; + found = true; + break; + } + } + if (found == false) { + err = -EINVAL; + goto err; + } + da_align = gen_pool_alloc(gen_pool, bytes); + + /* Calculate the page-aligned PA, VA and size */ + pa_align = round_down((u32) va, PAGE_SIZE); + size_align = round_up(bytes + va - pa_align, PAGE_SIZE); + + /* Mapped address = MSB of VA | LSB of PA */ + tmp_addr = (da_align | ((u32) va & (PAGE_SIZE - 1))); + dmm_obj = add_mapping_info(obj, gen_pool, pa_align, tmp_addr, + size_align); + if (!dmm_obj) + goto err; + if (flags & IOVMF_DA_PHYS) { + err = phys_to_device_map(iovmm_obj->iommu, pa_align, + da, size_align, dmm_obj->pages); + } else if (flags & IOVMF_DA_ANON) { + err = io_to_device_map(iovmm_obj->iommu, pa_align, + da, size_align, dmm_obj->pages); + } else { + err = user_to_device_map(iovmm_obj->iommu, pa_align, + da_align, size_align, dmm_obj->pages); + } + if ((!err) && (flags & IOVMF_DA_USER)) + *da = tmp_addr; + + return 0; + +err: + return err; +} + + +/* + * ======== proc_un_map ======== + * Purpose: + * Removes a MPU buffer mapping from the DSP address space. + */ +int user_un_map(struct iodmm_struct *obj, u32 map_addr) +{ + int status = 0; + u32 va_align; + u32 size_align; + struct dmm_map_object *map_obj; + va_align = round_down(map_addr, PAGE_SIZE); + + /* + * Update DMM structures. Get the size to unmap. + * This function returns error if the VA is not mapped + */ + /* find requested memory are in cached mapping information */ + map_obj = find_containing_mapping(obj, (u32)va_align, 0); + if (!map_obj) + goto err; + size_align = map_obj->size; + /* Remove mapping from the page tables. */ + status = user_to_device_unmap(obj->iovmm->iommu, va_align, + size_align); + if (status) + goto err; + /* + * A successful unmap should be followed by removal of map_obj + * from dmm_map_list, so that mapped memory resource tracking + * remains uptodate + */ + remove_mapping_information(obj, map_obj->da, map_obj->size); + return 0; +err: + return status; +} + +void user_remove_resources(struct iodmm_struct *obj) +{ + + struct dmm_map_object *temp_map, *map_obj; + int status = 0; + + /* Free DMM mapped memory resources */ + list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) { + status = user_un_map(obj, map_obj->va); + if (status) { + pr_err("%s: proc_un_map failed!" + " status = 0x%x\n", __func__, status); + } + } +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Userspace DMM to IOMMU"); +MODULE_AUTHOR("Hari Kanigeri"); +MODULE_AUTHOR("Ramesh Gupta"); +MODULE_AUTHOR("Ohad Ben-Cohen"); diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c index 966e9e1e11a..65e88f53411 100644 --- a/arch/arm/plat-omap/iommu.c +++ b/arch/arm/plat-omap/iommu.c @@ -16,8 +16,8 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/ioport.h> -#include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/eventfd.h> #include <asm/cacheflush.h> @@ -261,9 +261,8 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) } cr = iotlb_alloc_cr(obj, e); - if (IS_ERR(cr)) { + if (IS_ERR(cr)) return PTR_ERR(cr); - } iotlb_load_cr(obj, cr); kfree(cr); @@ -434,22 +433,15 @@ EXPORT_SYMBOL_GPL(foreach_iommu_device); */ static void flush_iopgd_range(u32 *first, u32 *last) { - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); + dmac_flush_range(first, last); + outer_flush_range(virt_to_phys(first), virt_to_phys(last)); } + static void flush_iopte_range(u32 *first, u32 *last) { - /* FIXME: L2 cache should be taken care of if it exists */ - do { - asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" - : : "r" (first)); - first += L1_CACHE_BYTES / sizeof(*first); - } while (first <= last); + dmac_flush_range(first, last); + outer_flush_range(virt_to_phys(first), virt_to_phys(last)); } static void iopte_free(u32 *iopte) @@ -713,7 +705,7 @@ size_t iopgtable_clear_entry(struct iommu *obj, u32 da) } EXPORT_SYMBOL_GPL(iopgtable_clear_entry); -static void iopgtable_clear_entry_all(struct iommu *obj) +void iopgtable_clear_entry_all(struct iommu *obj) { int i; @@ -740,6 +732,15 @@ static void iopgtable_clear_entry_all(struct iommu *obj) spin_unlock(&obj->page_table_lock); } +EXPORT_SYMBOL_GPL(iopgtable_clear_entry_all); + +void eventfd_notification(struct iommu *obj) +{ + struct iommu_event_ntfy *fd_reg; + + list_for_each_entry(fd_reg, &obj->event_list, list) + eventfd_signal(fd_reg->evt_ctx, 1); +} /* * Device IOMMU generic operations @@ -753,6 +754,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) if (!obj->refcount) return IRQ_NONE; + eventfd_notification(obj); /* Dynamic loading TLB or PTE */ if (obj->isr) err = obj->isr(obj); @@ -839,6 +841,7 @@ struct iommu *iommu_get(const char *name) if (!try_module_get(obj->owner)) goto err_module; + iommu_set_twl(obj, true); mutex_unlock(&obj->iommu_lock); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); return obj; @@ -929,6 +932,8 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) spin_lock_init(&obj->page_table_lock); INIT_LIST_HEAD(&obj->mmap); + INIT_LIST_HEAD(&obj->event_list); + obj->regbase = pdata->io_base; err = request_irq(pdata->irq, iommu_fault_handler, IRQF_SHARED, @@ -988,6 +993,7 @@ static void iopte_cachep_ctor(void *iopte) clean_dcache_area(iopte, IOPTE_TABLE_SIZE); } + static int __init omap_iommu_init(void) { struct kmem_cache *p; diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 51ef43e8def..a2ad64ab82e 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -15,7 +15,8 @@ #include <linux/vmalloc.h> #include <linux/device.h> #include <linux/scatterlist.h> - +#include <linux/platform_device.h> +#include <linux/eventfd.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> @@ -24,6 +25,15 @@ #include "iopgtable.h" +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/swap.h> +#include <linux/genalloc.h> + + /* * A device driver needs to create address mappings between: * @@ -58,8 +68,336 @@ * '*': not yet, but feasible. */ +#define OMAP_IOVMM_NAME "iovmm-omap" + +static atomic_t num_of_iovmmus; +static struct class *omap_iovmm_class; +static dev_t omap_iovmm_dev; static struct kmem_cache *iovm_area_cachep; +static int omap_create_vmm_pool(struct iodmm_struct *obj, int pool_id, int size, + int sa) +{ + struct iovmm_pool *pool; + struct iovmm_device *iovmm = obj->iovmm; + + pool = kzalloc(sizeof(struct iovmm_pool), GFP_ATOMIC); + if (!pool) + goto err_out; + + pool->pool_id = pool_id; + pool->da_begin = sa; + pool->da_end = sa + size; + pool->genpool = gen_pool_create(10, -1); + gen_pool_add(pool->genpool, pool->da_begin, size, -1); + INIT_LIST_HEAD(&pool->list); + list_add_tail(&pool->list, &iovmm->mmap_pool); + return 0; + +err_out: + return -ENOMEM; +} + +static int omap_delete_vmm_pool(struct iovm_struct *obj, int pool_id, int size) +{ +/*FIX ME: ADD CODE HERE*/ + return 0; +} + +static int omap_iovmm_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long args) +{ + struct iodmm_struct *obj; + int ret = 0; + obj = (struct iodmm_struct *)filp->private_data; + + if (!obj) + return -EINVAL; + + if (_IOC_TYPE(cmd) != IOVMM_IOC_MAGIC) + return -ENOTTY; + + switch (cmd) { + case IOVMM_IOCSETTLBENT: + { + struct iotlb_entry e; + int size; + size = copy_from_user(&e, (void __user *)args, + sizeof(struct iotlb_entry)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + load_iotlb_entry(obj->iovmm->iommu, &e); + break; + } + case IOVMM_IOCSETPTEENT: + { + + struct iotlb_entry e; + int size; + int page_sz; + struct dmm_map_object *dmm_obj; + size = copy_from_user(&e, (void __user *)args, + sizeof(struct iotlb_entry)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + page_sz = e.pgsz; + switch (page_sz) { + case MMU_CAM_PGSZ_16M: + size = PAGE_SIZE_16MB; + break; + case MMU_CAM_PGSZ_1M: + size = PAGE_SIZE_1MB; + break; + case MMU_CAM_PGSZ_64K: + size = PAGE_SIZE_64KB; + break; + case MMU_CAM_PGSZ_4K: + size = PAGE_SIZE_4KB; + break; + default: + size = 0; + goto err_user_buf; + break; + } + dmm_obj = add_mapping_info(obj, -1, e.pa, e.da, size); + + iopgtable_store_entry(obj->iovmm->iommu, &e); + break; + } + + case IOVMM_IOCCLEARPTEENTRIES: + { + iopgtable_clear_entry_all(obj->iovmm->iommu); + flush_iotlb_all(obj->iovmm->iommu); + break; + } + + case IOVMM_IOCCREATEPOOL: + { + struct iovmm_pool_info pool_info; + int size; + + size = copy_from_user(&pool_info, (void __user *)args, + sizeof(struct iovmm_pool_info)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + omap_create_vmm_pool(obj, pool_info.pool_id, pool_info.size, + pool_info.da_begin); + break; + } + case IOVMM_IOCMEMMAP: + { + struct dmm_map_info map_info; + int size; + int status; + + size = copy_from_user(&map_info, (void __user *)args, + sizeof(struct dmm_map_info)); + + status = dmm_user(obj, map_info.mem_pool_id, + map_info.da, map_info.mpu_addr, + map_info.size, map_info.flags); + copy_to_user((void __user *)args, &map_info, + sizeof(struct dmm_map_info)); + ret = status; + break; + } + case IOVMM_IOCMEMUNMAP: + { + u32 da; + int size; + int status; + + size = copy_from_user(&da, (void __user *)args, sizeof(u32)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + status = user_un_map(obj, da); + ret = status; + break; + } + case IOMMU_IOCEVENTREG: + { + int fd; + int size; + struct iommu_event_ntfy *fd_reg; + + size = copy_from_user(&fd, (void __user *)args, sizeof(int)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + + fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL); + fd_reg->fd = fd; + fd_reg->evt_ctx = eventfd_ctx_fdget(fd); + INIT_LIST_HEAD(&fd_reg->list); + list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list); + break; + } + case IOMMU_IOCEVENTUNREG: + { + int fd; + int size; + struct iommu_event_ntfy *fd_reg, *temp_reg; + + size = copy_from_user(&fd, (void __user *)args, sizeof(int)); + if (size) { + ret = -EINVAL; + goto err_user_buf; + } + /* Free DMM mapped memory resources */ + list_for_each_entry_safe(fd_reg, temp_reg, + &obj->iovmm->iommu->event_list, list) { + if (fd_reg->fd == fd) { + list_del(&fd_reg->list); + kfree(fd_reg); + } + } + break; + } + case IOVMM_IOCDATOPA: + case IOVMM_IOCMEMFLUSH: + case IOVMM_IOCMEMINV: + case IOVMM_IOCDELETEPOOL: + default: + return -ENOTTY; + } +err_user_buf: + return ret; + +} + +static int omap_iovmm_open(struct inode *inode, struct file *filp) +{ + struct iodmm_struct *iodmm; + struct iovmm_device *obj; + + obj = container_of(inode->i_cdev, struct iovmm_device, cdev); + + iodmm = kzalloc(sizeof(struct iodmm_struct), GFP_KERNEL); + INIT_LIST_HEAD(&iodmm->map_list); + spin_lock_init(&iodmm->dmm_map_lock); + + iodmm->iovmm = obj; + obj->iommu = iommu_get(obj->name); + filp->private_data = iodmm; + + return 0; + +} +static int omap_iovmm_release(struct inode *inode, struct file *filp) +{ + int status = 0; + struct iodmm_struct *obj; + + if (!filp->private_data) { + status = -EIO; + goto err; + } + obj = filp->private_data; + flush_signals(current); + user_remove_resources(obj); + iommu_put(obj->iovmm->iommu); + kfree(obj); + filp->private_data = NULL; + +err: + return status; +} + +static const struct file_operations omap_iovmm_fops = { + .owner = THIS_MODULE, + .open = omap_iovmm_open, + .release = omap_iovmm_release, + .ioctl = omap_iovmm_ioctl, +}; + +static int __devinit omap_iovmm_probe(struct platform_device *pdev) +{ + int err = -ENODEV; + int major, minor; + struct device *tmpdev; + struct iommu_platform_data *pdata = + (struct iommu_platform_data *)pdev->dev.platform_data; + int ret = 0; + struct iovmm_device *obj; + + obj = kzalloc(sizeof(struct iovm_struct), GFP_KERNEL); + + major = MAJOR(omap_iovmm_dev); + minor = atomic_read(&num_of_iovmmus); + atomic_inc(&num_of_iovmmus); + + obj->minor = minor; + obj->name = pdata->name; + INIT_LIST_HEAD(&obj->mmap_pool); + + cdev_init(&obj->cdev, &omap_iovmm_fops); + obj->cdev.owner = THIS_MODULE; + ret = cdev_add(&obj->cdev, MKDEV(major, minor), 1); + if (ret) { + dev_err(&pdev->dev, "%s: cdev_add failed: %d\n", __func__, ret); + goto err_cdev; + } + + tmpdev = device_create(omap_iovmm_class, NULL, + MKDEV(major, minor), + NULL, + OMAP_IOVMM_NAME "%d", minor); + if (IS_ERR(tmpdev)) { + ret = PTR_ERR(tmpdev); + pr_err("%s: device_create failed: %d\n", __func__, ret); + goto clean_cdev; + } + + pr_info("%s initialized %s, major: %d, base-minor: %d\n", + OMAP_IOVMM_NAME, + pdata->name, + MAJOR(omap_iovmm_dev), + minor); + platform_set_drvdata(pdev, obj); + return 0; +clean_cdev: + cdev_del(&obj->cdev); +err_cdev: + return err; +} + +static int __devexit omap_iovmm_remove(struct platform_device *pdev) +{ + struct iovmm_device *obj = platform_get_drvdata(pdev); + int major = MAJOR(omap_iovmm_dev); + device_destroy(omap_iovmm_class, MKDEV(major, obj->minor)); + cdev_del(&obj->cdev); + platform_set_drvdata(pdev, NULL); + iopgtable_clear_entry_all(obj->iommu); + iommu_put(obj->iommu); + free_pages((unsigned long)obj->iommu->iopgd, + get_order(IOPGD_TABLE_SIZE)); + kfree(obj); + return 0; + +} + + + +static struct platform_driver omap_iovmm_driver = { + .probe = omap_iovmm_probe, + .remove = __devexit_p(omap_iovmm_remove), + .driver = { + .name = "omap-iovmm", + }, +}; + + /* return total bytes of sg buffers */ static size_t sgtable_len(const struct sg_table *sgt) { @@ -275,7 +613,6 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, if (!obj || !bytes) return ERR_PTR(-EINVAL); - start = da; alignment = PAGE_SIZE; @@ -290,11 +627,9 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, obj->da_end - start < bytes) { return ERR_PTR(-EINVAL); } - tmp = NULL; if (list_empty(&obj->mmap)) goto found; - prev_end = 0; list_for_each_entry(tmp, &obj->mmap, list) { @@ -884,19 +1219,46 @@ void iommu_kfree(struct iommu *obj, u32 da) } EXPORT_SYMBOL_GPL(iommu_kfree); +static int iommu_dmm(struct iodmm_struct *obj, u32 pool_id, u32 *da, + u32 va, size_t bytes, u32 flags) +{ + int err = 0; + + err = dmm_user(obj, pool_id, da, va, bytes, flags); + return err; +} static int __init iovmm_init(void) { const unsigned long flags = SLAB_HWCACHE_ALIGN; struct kmem_cache *p; + int num, ret; p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, flags, NULL); if (!p) return -ENOMEM; + iovm_area_cachep = p; + num = iommu_get_plat_data_size(); + ret = alloc_chrdev_region(&omap_iovmm_dev, 0, num, OMAP_IOVMM_NAME); + if (ret) { + pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret); + goto out; + } + omap_iovmm_class = class_create(THIS_MODULE, OMAP_IOVMM_NAME); + if (IS_ERR(omap_iovmm_class)) { + ret = PTR_ERR(omap_iovmm_class); + pr_err("%s: class_create failed: %d\n", __func__, ret); + goto unreg_region; + } + atomic_set(&num_of_iovmmus, 0); - return 0; + return platform_driver_register(&omap_iovmm_driver); +unreg_region: + unregister_chrdev_region(omap_iovmm_dev, num); +out: + return ret; } module_init(iovmm_init); |