summaryrefslogtreecommitdiff
path: root/drivers/media/video/tiler
diff options
context:
space:
mode:
authorLajos Molnar <molnar@ti.com>2011-04-07 08:41:24 +0100
committerAndy Green <andy.green@linaro.org>2011-04-07 08:41:24 +0100
commit86ff971f7ec1db3ffe0e335f662668b61779d275 (patch)
tree9d499e90aa1807bfc58132bb930f94cca483d94a /drivers/media/video/tiler
parentceda3165bc1cf6520cd9247db2cf3d75489e7aa8 (diff)
TILER: Detailed reorganization of tiler driver
Moved file operations, buffer/process management and major kernel API-s to tiler-ioctl.c. Moved tiler geometric manipulations to tiler-geom.c. Added wrappers around block manipulations so that other tiler source files can manipulate blocks without needing to share the main tiler mutex. Listed non-static files in tiler-pri.h. Signed-off-by: Lajos Molnar <molnar@ti.com> Signed-off-by: David Sin <davidsin@ti.com>
Diffstat (limited to 'drivers/media/video/tiler')
-rw-r--r--drivers/media/video/tiler/Makefile2
-rw-r--r--drivers/media/video/tiler/_tiler.h102
-rw-r--r--drivers/media/video/tiler/tiler-def.h2
-rw-r--r--drivers/media/video/tiler/tiler-geom.c43
-rw-r--r--drivers/media/video/tiler/tiler-ioctl.c745
-rw-r--r--drivers/media/video/tiler/tiler-main.c1832
-rw-r--r--drivers/media/video/tiler/tiler-reserve.c167
-rw-r--r--drivers/media/video/tiler/tiler-reserve.h11
-rw-r--r--drivers/media/video/tiler/tmm.h10
9 files changed, 1534 insertions, 1380 deletions
diff --git a/drivers/media/video/tiler/Makefile b/drivers/media/video/tiler/Makefile
index 4a4b48b73ea..c617c4cc09d 100644
--- a/drivers/media/video/tiler/Makefile
+++ b/drivers/media/video/tiler/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_TI_TILER) += tcm/
obj-$(CONFIG_TI_TILER) += tiler.o
-tiler-objs = tiler-main.o tiler-reserve.o tiler-geom.o tmm-pat.o
+tiler-objs = tiler-geom.o tiler-main.o tiler-ioctl.o tiler-reserve.o tmm-pat.o
obj-$(CONFIG_TI_TILER) += tiler_dmm.o
tiler_dmm-objs = dmm.o
diff --git a/drivers/media/video/tiler/_tiler.h b/drivers/media/video/tiler/_tiler.h
new file mode 100644
index 00000000000..474b181b99d
--- /dev/null
+++ b/drivers/media/video/tiler/_tiler.h
@@ -0,0 +1,102 @@
+#ifndef __TILER_PACK_H__
+#define __TILER_PACK_H__
+
+#include <linux/kernel.h>
+#include "tcm.h"
+
+extern const struct file_operations tiler_fops;
+
+/* per process (thread group) info */
+struct process_info {
+ struct list_head list; /* other processes */
+ struct list_head groups; /* my groups */
+ struct list_head bufs; /* my registered buffers */
+ pid_t pid; /* really: thread group ID */
+ u32 refs; /* open tiler devices, 0 for processes
+ tracked via kernel APIs */
+ bool kernel; /* tracking kernel objects */
+};
+
+/* per group info (within a process) */
+struct gid_info {
+ struct list_head by_pid; /* other groups */
+ struct list_head areas; /* all areas in this pid/gid */
+ struct list_head reserved; /* areas pre-reserved */
+ struct list_head onedim; /* all 1D areas in this pid/gid */
+ u32 gid; /* group ID */
+ int refs; /* instances directly using this ptr */
+ struct process_info *pi; /* parent */
+};
+
+struct area_info {
+ struct list_head by_gid; /* areas in this pid/gid */
+ struct list_head blocks; /* blocks in this area */
+ u32 nblocks; /* # of blocks in this area */
+
+ struct tcm_area area; /* area details */
+ struct gid_info *gi; /* link to parent, if still alive */
+};
+
+struct mem_info {
+ struct list_head global; /* reserved / global blocks */
+ struct tiler_block_t blk; /* block info */
+ u32 num_pg; /* number of pages in page-list */
+ u32 usr; /* user space address */
+ u32 *pg_ptr; /* list of mapped struct page ptrs */
+ struct tcm_area area;
+ u32 *mem; /* list of alloced phys addresses */
+ int refs; /* number of times referenced */
+ bool alloced; /* still alloced */
+
+ struct list_head by_area; /* blocks in the same area / 1D */
+ void *parent; /* area info for 2D, else group info */
+};
+
+/* tiler-main.c */
+struct mem_info *find_n_lock(u32 key, u32 id, struct gid_info *gi);
+s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info);
+s32 map_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, u32 usr_addr);
+
+void destroy_group(struct gid_info *pi);
+
+struct mem_info *find_block_by_ssptr(u32 sys_addr);
+void fill_block_info(struct mem_info *i, struct tiler_block_info *blk);
+
+void unlock_n_free(struct mem_info *mi, bool free);
+
+s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
+ u16 *x_area, u16 *y_area, u16 *band,
+ u16 *align, u16 *offs, u16 *in_offs);
+
+struct gid_info *get_gi(struct process_info *pi, u32 gid);
+void release_gi(struct gid_info *gi);
+
+void add_reserved_blocks(struct list_head *reserved, struct gid_info *gi);
+void release_blocks(struct list_head *reserved);
+
+/* tiler-reserve.c */
+void reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, struct process_info *pi, bool can_together);
+void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid,
+ struct process_info *pi);
+void unreserve_blocks(struct process_info *pi, u32 gid);
+
+s32 reserve_2d(enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, u16 offs, struct gid_info *gi,
+ struct list_head *pos);
+s32 pack_nv12(int n, u16 w, u16 w1, u16 h, struct gid_info *gi,
+ u8 *p);
+void destroy_processes(void);
+void tiler_proc_init(void);
+/*** __get_area */
+u32 tiler_get_address(struct tiler_view_orient orient,
+ enum tiler_fmt fmt, u32 x, u32 y);
+/*** find block */
+void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y);
+
+#endif
diff --git a/drivers/media/video/tiler/tiler-def.h b/drivers/media/video/tiler/tiler-def.h
index d931e18de15..facf497e31e 100644
--- a/drivers/media/video/tiler/tiler-def.h
+++ b/drivers/media/video/tiler/tiler-def.h
@@ -28,6 +28,8 @@
#define TILER_GET_ACC_MODE(x) ((enum tiler_fmt) (TILFMT_8BIT + \
(((u32)x & (TILER_ACC_MODE_MASK<<TILER_ACC_MODE_SHIFT))>>TILER_ACC_MODE_SHIFT)))
+#define TILER_FORMATS 4
+
#define TILER_ALIAS_BASE 0x60000000u
#define TILVIEW_8BIT TILER_ALIAS_BASE
#define TILVIEW_16BIT (TILVIEW_8BIT + 0x08000000u)
diff --git a/drivers/media/video/tiler/tiler-geom.c b/drivers/media/video/tiler/tiler-geom.c
index d85f54cf885..6dad0c974d2 100644
--- a/drivers/media/video/tiler/tiler-geom.c
+++ b/drivers/media/video/tiler/tiler-geom.c
@@ -18,6 +18,8 @@
#include <linux/module.h>
#include <mach/tiler.h>
#include "tiler-def.h"
+#include <mach/dmm.h> /* TEMP */
+#include "tcm.h"
#define DMM_SHIFT_PER_X_8 0
#define DMM_SHIFT_PER_Y_8 0
@@ -63,6 +65,45 @@
#define DMM_TILER_STRIDE_90_(N) \
(DMM_TILER_THE(CONT_HEIGHT) << DMM_SHIFT_PER_X_##N)
+/*
+ * TILER Memory model query method
+ */
+bool is_tiler_addr(u32 addr)
+{
+ return addr >= TILVIEW_8BIT && addr < TILVIEW_END;
+}
+
+/*
+ * TILER block query method
+ */
+
+static const u32 tiler_bpps[TILER_FORMATS] = { 1, 2, 4, 1 };
+static const u32 tiler_strides[TILER_FORMATS] = { 16384, 32768, 32768, 0 };
+
+u32 tiler_bpp(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ return tiler_bpps[fmt - TILFMT_8BIT];
+}
+
+u32 tiler_pstride(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ return tiler_strides[fmt - TILFMT_8BIT] ? : tiler_vstride(b);
+}
+
+enum tiler_fmt tiler_fmt(const struct tiler_block_t *b)
+{
+ if (!is_tiler_addr(b->phys))
+ return TILFMT_INVALID;
+
+ return TILER_GET_ACC_MODE(b->phys);
+}
+
void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y)
{
u32 x_bits, y_bits, offset;
@@ -155,7 +196,7 @@ u32 tiler_get_address(struct tiler_view_orient orient,
return (u32)
TIL_ADDR((tmp << alignment), (orient.rotate_90 ? 1 : 0),
(orient.y_invert ? 1 : 0), (orient.x_invert ? 1 : 0),
- (fmt - 1));
+ fmt);
}
u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient)
diff --git a/drivers/media/video/tiler/tiler-ioctl.c b/drivers/media/video/tiler/tiler-ioctl.c
new file mode 100644
index 00000000000..dd82e12494a
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-ioctl.c
@@ -0,0 +1,745 @@
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/fs.h> /* fops */
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kmalloc */
+#include <linux/sched.h> /* current */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <asm/mach/map.h> /* for ioremap_page */
+
+#include <mach/tiler.h>
+#include <mach/dmm.h> /* TEMP */
+#include "_tiler.h"
+
+static bool security = CONFIG_TILER_SECURITY;
+static bool ssptr_lookup = true;
+static bool offset_lookup = true;
+
+module_param(security, bool, 0644);
+MODULE_PARM_DESC(security,
+ "Separate allocations by different processes into different pages");
+module_param(ssptr_lookup, bool, 0644);
+MODULE_PARM_DESC(ssptr_lookup,
+ "Allow looking up a block by ssptr - This is a security risk");
+module_param(offset_lookup, bool, 0644);
+MODULE_PARM_DESC(offset_lookup,
+ "Allow looking up a buffer by offset - This is a security risk");
+
+static struct mutex mtx;
+static struct list_head procs;
+
+/*
+ * Buffer handling methods
+ * ==========================================================================
+ */
+
+struct __buf_info {
+ struct list_head by_pid; /* list of buffers per pid */
+ struct tiler_buf_info buf_info;
+ struct mem_info *mi[TILER_MAX_NUM_BLOCKS]; /* blocks */
+};
+
+/* check if an offset is used */
+/* must have mutex */
+/*** register_buf */
+static bool _m_offs_in_use(u32 offs, u32 length, struct process_info *pi)
+{
+ struct __buf_info *_b;
+ list_for_each_entry(_b, &pi->bufs, by_pid)
+ if (_b->buf_info.offset < offs + length &&
+ _b->buf_info.offset + _b->buf_info.length > offs)
+ return 1;
+ return 0;
+}
+
+/* get an offset */
+/* must have mutex */
+/*** register_buf */
+static u32 _m_get_offs(struct process_info *pi, u32 length)
+{
+ static u32 offs = 0xda7a;
+
+ /* ensure no-one is using this offset */
+ while ((offs << PAGE_SHIFT) + length < length ||
+ _m_offs_in_use(offs << PAGE_SHIFT, length, pi)) {
+ /* Galois LSF: 20, 17 */
+ offs = (offs >> 1) ^ (u32)((0 - (offs & 1u)) & 0x90000);
+ }
+
+ return offs << PAGE_SHIFT;
+}
+
+/* find and lock a block. process_info is optional */
+/* must have mutex */
+static struct mem_info *
+_m_lock_block(u32 key, u32 id, struct process_info *pi) {
+ struct gid_info *gi = NULL;
+ struct mem_info *mi = NULL;
+
+ /* if process_info is given, look there first */
+ if (pi) {
+ /* find block in process list and free it */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ mi = find_n_lock(key, id, gi);
+ if (mi)
+ return mi;
+ }
+ }
+
+ /* if not found or no process_info given, find block in global list */
+ return find_n_lock(key, id, NULL);
+}
+
+/* register a buffer */
+/* must have mutex */
+static s32 _m_register_buf(struct __buf_info *_b, struct process_info *pi)
+{
+ struct mem_info *mi = NULL;
+ struct tiler_buf_info *b = &_b->buf_info;
+ u32 i, num = b->num_blocks;
+
+ /* check validity */
+ if (num > TILER_MAX_NUM_BLOCKS)
+ return -EINVAL;
+
+ /* find each block */
+ b->length = 0;
+ for (i = 0; i < num; i++) {
+ mi = _m_lock_block(b->blocks[i].key, b->blocks[i].id, pi);
+ if (!mi) {
+ /* unlock any blocks already found */
+ while (i--)
+ unlock_n_free(_b->mi[i], false);
+ return -EACCES;
+ }
+ _b->mi[i] = mi;
+ b->length += tiler_size(&mi->blk);
+ }
+
+ /* if found all, register buffer */
+ b->offset = _m_get_offs(pi, b->length);
+
+ list_add(&_b->by_pid, &pi->bufs);
+
+ return 0;
+}
+
+/* unregister a buffer */
+/* must have mutex */
+static void _m_unregister_buf(struct __buf_info *_b)
+{
+ u32 i;
+
+ /* unregister */
+ list_del(&_b->by_pid);
+
+ /* no longer using the blocks */
+ for (i = 0; i < _b->buf_info.num_blocks; i++)
+ unlock_n_free(_b->mi[i], false);
+
+ kfree(_b);
+}
+
+/*
+ * process_info handling methods
+ * ==========================================================================
+ */
+
+
+/* get process info, and increment refs for device tracking */
+static struct process_info *__get_pi(pid_t pid, bool kernel)
+{
+ struct process_info *pi;
+
+ /* treat all processes as the same, kernel processes are still treated
+ differently so not to free kernel allocated areas when a user process
+ closes the tiler driver */
+ if (!security)
+ pid = 0;
+
+ /* find process context */
+ mutex_lock(&mtx);
+ list_for_each_entry(pi, &procs, list) {
+ if (pi->pid == pid && pi->kernel == kernel)
+ goto done;
+ }
+
+ /* create process context */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto done;
+
+ memset(pi, 0, sizeof(*pi));
+ pi->pid = pid;
+ pi->kernel = kernel;
+ INIT_LIST_HEAD(&pi->groups);
+ INIT_LIST_HEAD(&pi->bufs);
+ list_add(&pi->list, &procs);
+done:
+ if (pi && !kernel)
+ pi->refs++;
+ mutex_unlock(&mtx);
+ return pi;
+}
+
+/**
+ * Free all info kept by a process:
+ *
+ * all registered buffers, allocated blocks, and unreferenced
+ * blocks. Any blocks/areas still referenced will move to the
+ * orphaned lists to avoid issues if a new process is created
+ * with the same pid.
+ *
+ * (must have mutex)
+ */
+void _m_free_process_info(struct process_info *pi)
+{
+ struct gid_info *gi, *gi_;
+ struct __buf_info *_b = NULL, *_b_ = NULL;
+
+ /* unregister all buffers */
+ list_for_each_entry_safe(_b, _b_, &pi->bufs, by_pid)
+ _m_unregister_buf(_b);
+
+ BUG_ON(!list_empty(&pi->bufs));
+
+ /* free all allocated blocks, and remove unreferenced ones */
+ list_for_each_entry_safe(gi, gi_, &pi->groups, by_pid) {
+ destroy_group(gi);
+ }
+
+ BUG_ON(!list_empty(&pi->groups));
+ list_del(&pi->list);
+ kfree(pi);
+}
+
+void destroy_processes(void)
+{
+ struct process_info *pi, *pi_;
+
+ mutex_lock(&mtx);
+
+ list_for_each_entry_safe(pi, pi_, &procs, list)
+ _m_free_process_info(pi);
+ BUG_ON(!list_empty(&procs));
+
+ mutex_unlock(&mtx);
+}
+
+void tiler_proc_init(void)
+{
+#ifdef CONFIG_TILER_SECURE
+ security = true;
+ offset_lookup = ssptr_lookup = false;
+#endif
+
+ mutex_init(&mtx);
+ INIT_LIST_HEAD(&procs);
+}
+
+/*
+ * File operations (mmap, ioctl, open, close)
+ * ==========================================================================
+ */
+
+static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct __buf_info *_b = NULL;
+ struct tiler_buf_info *b = NULL;
+ u32 i, map_offs, map_size, blk_offs, blk_size, mapped_size;
+ struct process_info *pi = filp->private_data;
+ u32 offs = vma->vm_pgoff << PAGE_SHIFT;
+ u32 size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ mutex_lock(&mtx);
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (offs >= _b->buf_info.offset &&
+ offs + size <= _b->buf_info.offset + _b->buf_info.length) {
+ b = &_b->buf_info;
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+ if (!b)
+ return -ENXIO;
+
+ /* mmap relevant blocks */
+ blk_offs = _b->buf_info.offset;
+ mapped_size = 0;
+ for (i = 0; i < b->num_blocks; i++, blk_offs += blk_size) {
+ blk_size = tiler_size(&_b->mi[i]->blk);
+ if (offs >= blk_offs + blk_size || offs + size < blk_offs)
+ continue;
+ map_offs = max(offs, blk_offs) - blk_offs;
+ map_size = min(size - mapped_size, blk_size);
+ if (tiler_mmap_blk(&_b->mi[i]->blk, map_offs, map_size, vma,
+ mapped_size))
+ return -EAGAIN;
+ mapped_size += map_size;
+ }
+ return 0;
+}
+
+static s32 tiler_ioctl(struct inode *ip, struct file *filp, u32 cmd,
+ unsigned long arg)
+{
+ pgd_t *pgd = NULL;
+ pmd_t *pmd = NULL;
+ pte_t *ptep = NULL, pte = 0x0;
+ s32 r = -1;
+ struct process_info *pi = filp->private_data;
+
+ struct __buf_info *_b = NULL;
+ struct tiler_buf_info buf_info = {0};
+ struct tiler_block_info block_info = {0};
+ struct mem_info *mi;
+
+ switch (cmd) {
+ case TILIOC_GBLK:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(block_info)))
+ return -EFAULT;
+
+ switch (block_info.fmt) {
+ case TILFMT_PAGE:
+ r = alloc_block(block_info.fmt, block_info.dim.len, 1,
+ block_info.align, block_info.offs,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ if (r)
+ return r;
+ break;
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ r = alloc_block(block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align, block_info.offs,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ if (r)
+ return r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mi) {
+ block_info.id = mi->blk.id;
+ block_info.stride = tiler_vstride(&mi->blk);
+#ifdef CONFIG_TILER_EXPOSE_SSPTR
+ block_info.ssptr = mi->blk.phys;
+#endif
+ }
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(block_info)))
+ return -EFAULT;
+ break;
+ case TILIOC_FBLK:
+ case TILIOC_UMBLK:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(block_info)))
+ return -EFAULT;
+
+ /* search current process first, then all processes */
+ mutex_lock(&mtx);
+ mi = _m_lock_block(block_info.key, block_info.id, pi);
+ mutex_unlock(&mtx);
+ if (mi) {
+ unlock_n_free(mi, true);
+ r = 0;
+ }
+ r = -EACCES;
+
+ /* free always succeeds */
+ break;
+
+ case TILIOC_GSSP:
+ pgd = pgd_offset(current->mm, arg);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, arg);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, arg);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return (pte & PAGE_MASK) |
+ (~PAGE_MASK & arg);
+ }
+ }
+ }
+ /* va not in page table */
+ return 0x0;
+ break;
+ case TILIOC_MBLK:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(block_info)))
+ return -EFAULT;
+
+ if (!block_info.ptr)
+ return -EFAULT;
+
+ r = map_block(block_info.fmt, block_info.dim.len, 1,
+ block_info.key, block_info.group_id, pi,
+ &mi, (u32)block_info.ptr);
+ if (r)
+ return r;
+
+ if (mi) {
+ block_info.id = mi->blk.id;
+ block_info.stride = tiler_vstride(&mi->blk);
+#ifdef CONFIG_TILER_EXPOSE_SSPTR
+ block_info.ssptr = mi->blk.phys;
+#endif
+ }
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(block_info)))
+ return -EFAULT;
+ break;
+#ifndef CONFIG_TILER_SECURE
+ case TILIOC_QBUF:
+ if (!offset_lookup)
+ return -EPERM;
+
+ if (copy_from_user(&buf_info, (void __user *)arg,
+ sizeof(buf_info)))
+ return -EFAULT;
+
+ mutex_lock(&mtx);
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ if (copy_to_user((void __user *)arg,
+ &_b->buf_info,
+ sizeof(_b->buf_info))) {
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ } else {
+ mutex_unlock(&mtx);
+ return 0;
+ }
+ }
+ }
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ break;
+#endif
+ case TILIOC_RBUF:
+ _b = kmalloc(sizeof(*_b), GFP_KERNEL);
+ if (!_b)
+ return -ENOMEM;
+
+ memset(_b, 0x0, sizeof(*_b));
+
+ if (copy_from_user(&_b->buf_info, (void __user *)arg,
+ sizeof(_b->buf_info))) {
+ kfree(_b); return -EFAULT;
+ }
+
+ mutex_lock(&mtx);
+ r = _m_register_buf(_b, pi);
+ mutex_unlock(&mtx);
+ if (r) {
+ kfree(_b); return -EACCES;
+ }
+
+ if (copy_to_user((void __user *)arg, &_b->buf_info,
+ sizeof(_b->buf_info))) {
+ _m_unregister_buf(_b);
+ return -EFAULT;
+ }
+ break;
+ case TILIOC_URBUF:
+ if (copy_from_user(&buf_info, (void __user *)arg,
+ sizeof(buf_info)))
+ return -EFAULT;
+
+ mutex_lock(&mtx);
+ /* buffer registration is per process */
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ _m_unregister_buf(_b);
+ mutex_unlock(&mtx);
+ return 0;
+ }
+ }
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ break;
+ case TILIOC_PRBLK:
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(block_info)))
+ return -EFAULT;
+
+ if (block_info.fmt == TILFMT_8AND16) {
+ reserve_nv12(block_info.key,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align,
+ block_info.offs,
+ block_info.group_id, pi,
+ true /* :TODO: get can_together */);
+ } else {
+ reserve_blocks(block_info.key,
+ block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align,
+ block_info.offs,
+ block_info.group_id, pi);
+ }
+ break;
+ case TILIOC_URBLK:
+ unreserve_blocks(pi, arg);
+ break;
+#ifndef CONFIG_TILER_SECURE
+ case TILIOC_QBLK:
+ if (!ssptr_lookup)
+ return -EPERM;
+
+ if (copy_from_user(&block_info, (void __user *)arg,
+ sizeof(block_info)))
+ return -EFAULT;
+
+ mi = find_block_by_ssptr(block_info.ssptr);
+ if (mi)
+ fill_block_info(mi, &block_info);
+ else
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, &block_info,
+ sizeof(block_info)))
+ return -EFAULT;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+ return 0x0;
+}
+
+static s32 tiler_open(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = __get_pi(current->tgid, false);
+ if (!pi)
+ return -ENOMEM;
+
+ filp->private_data = pi;
+ return 0x0;
+}
+
+static s32 tiler_release(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = filp->private_data;
+
+ mutex_lock(&mtx);
+ /* free resources if last device in this process */
+ if (0 == --pi->refs)
+ _m_free_process_info(pi);
+
+ mutex_unlock(&mtx);
+
+ return 0x0;
+}
+
+const struct file_operations tiler_fops = {
+ .open = tiler_open,
+ .ioctl = tiler_ioctl,
+ .release = tiler_release,
+ .mmap = tiler_mmap,
+};
+
+/*
+ * Kernel APIs
+ * ==========================================================================
+ */
+
+s32 tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ reserve_blocks(n, fmt, width, height, align, offs, gid, pi);
+ return 0;
+}
+EXPORT_SYMBOL(tiler_reservex);
+
+s32 tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs)
+{
+ return tiler_reservex(n, fmt, width, height, align, offs,
+ 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve);
+
+/* reserve area for n identical buffers */
+s32 tiler_reservex_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ reserve_nv12(n, width, height, align, offs, gid, pi,
+ true /* :TODO: get can_together */);
+ return 0;
+}
+EXPORT_SYMBOL(tiler_reservex_nv12);
+
+s32 tiler_reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs)
+{
+ return tiler_reservex_nv12(n, width, height, align, offs,
+ 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve_nv12);
+
+s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 align, u32 offs, u32 gid, pid_t pid)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ if (!blk || blk->phys)
+ BUG();
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = alloc_block(fmt, blk->width, blk->height, align, offs, blk->key,
+ gid, pi, &mi);
+ if (mi)
+ blk->phys = mi->blk.phys;
+ return res;
+}
+EXPORT_SYMBOL(tiler_allocx);
+
+s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 align, u32 offs)
+{
+ return tiler_allocx(blk, fmt, align, offs, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_alloc);
+
+s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 gid,
+ pid_t pid, u32 usr_addr)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ if (!blk || blk->phys)
+ BUG();
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = map_block(fmt, blk->width, blk->height, blk->key, gid, pi, &mi,
+ usr_addr);
+ if (mi)
+ blk->phys = mi->blk.phys;
+ return res;
+
+}
+EXPORT_SYMBOL(tiler_mapx);
+
+s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr)
+{
+ return tiler_mapx(blk, fmt, 0, current->tgid, usr_addr);
+}
+EXPORT_SYMBOL(tiler_map);
+
+s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ struct vm_area_struct *vma, u32 voffs)
+{
+ u32 v, p;
+ u32 len; /* area to map */
+
+ /* don't allow mremap */
+ vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+
+ /* mapping must fit into vma */
+ if (vma->vm_start > vma->vm_start + voffs ||
+ vma->vm_start + voffs > vma->vm_start + voffs + size ||
+ vma->vm_start + voffs + size > vma->vm_end)
+ BUG();
+
+ /* mapping must fit into block */
+ if (offs > offs + size ||
+ offs + size > tiler_size(blk))
+ BUG();
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* remap block portion */
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ if (len > size)
+ len = size;
+
+ vma->vm_pgoff = (blk->phys + offs) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, vma->vm_start + voffs, vma->vm_pgoff,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ voffs += len;
+ offs += len + p - v;
+ size -= len;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_mmap_blk);
+
+s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ u32 addr, u32 mtype)
+{
+ u32 v, p;
+ u32 len; /* area to map */
+ const struct mem_type *type = get_mem_type(mtype);
+
+ /* mapping must fit into address space */
+ if (addr > addr + size)
+ BUG();
+
+ /* mapping must fit into block */
+ if (offs > offs + size ||
+ offs + size > tiler_size(blk))
+ BUG();
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* move offset and address to end */
+ offs += blk->phys + size;
+ addr += size;
+
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ while (len && size) {
+ if (ioremap_page(addr - size, offs - size, type))
+ return -EAGAIN;
+ len -= PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ offs += p - v;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_ioremap_blk);
+
+void tiler_free(struct tiler_block_t *blk)
+{
+ /* find block */
+ struct mem_info *mi = find_n_lock(blk->key, blk->id, NULL);
+ if (mi)
+ unlock_n_free(mi, true);
+}
+EXPORT_SYMBOL(tiler_free);
diff --git a/drivers/media/video/tiler/tiler-main.c b/drivers/media/video/tiler/tiler-main.c
index d964b15baad..05e675b464f 100644
--- a/drivers/media/video/tiler/tiler-main.c
+++ b/drivers/media/video/tiler/tiler-main.c
@@ -22,30 +22,24 @@
#include <linux/device.h> /* struct class */
#include <linux/platform_device.h> /* platform_device() */
#include <linux/err.h> /* IS_ERR() */
-#include <linux/uaccess.h> /* copy_to_user */
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
+/* #include <linux/sched.h> */
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h> /* page_cache_release() */
#include <linux/slab.h>
-#include <asm/mach/map.h> /* for ioremap_page */
#include <mach/tiler.h>
#include <mach/dmm.h>
#include "tmm.h"
+#include "tcm.h"
#include "tiler-def.h"
#include "tcm/tcm-sita.h" /* Algo Specific header */
-#include "tiler-reserve.h"
+#include "_tiler.h"
#include <linux/syscalls.h>
-static bool security = CONFIG_TILER_SECURITY;
static bool ssptr_id = CONFIG_TILER_SSPTR_ID;
-static bool ssptr_lookup = true;
-static bool offset_lookup = true;
static uint default_align = CONFIG_TILER_ALIGNMENT;
static uint granularity = CONFIG_TILER_GRANULARITY;
@@ -55,15 +49,6 @@ module_param_named(align, default_align, uint, 0644);
MODULE_PARM_DESC(align, "Default block ssptr alignment");
module_param_named(grain, granularity, uint, 0644);
MODULE_PARM_DESC(grain, "Granularity (bytes)");
-module_param(ssptr_lookup, bool, 0644);
-MODULE_PARM_DESC(ssptr_lookup,
- "Allow looking up a block by ssptr - This is a security risk");
-module_param(offset_lookup, bool, 0644);
-MODULE_PARM_DESC(offset_lookup,
- "Allow looking up a buffer by offset - This is a security risk");
-module_param(security, bool, 0644);
-MODULE_PARM_DESC(security,
- "Separate allocations by different processes into different pages");
struct tiler_dev {
struct cdev cdev;
@@ -79,64 +64,9 @@ struct platform_driver tiler_driver_ldm = {
.remove = NULL,
};
-/* per process (thread group) info */
-struct process_info {
- struct list_head list; /* other processes */
- struct list_head groups; /* my groups */
- struct list_head bufs; /* my registered buffers */
- pid_t pid; /* really: thread group ID */
- u32 refs; /* open tiler devices, 0 for processes
- tracked via kernel APIs */
- bool kernel; /* tracking kernel objects */
-};
-
-/* per group info (within a process) */
-struct gid_info {
- struct list_head by_pid; /* other groups */
- struct list_head areas; /* all areas in this pid/gid */
- struct list_head reserved; /* areas pre-reserved */
- struct list_head onedim; /* all 1D areas in this pid/gid */
- u32 gid; /* group ID */
- u32 refs; /* instances directly using this ptr */
- struct process_info *pi; /* parent */
-};
-
-struct list_head blocks;
-struct list_head procs;
-struct list_head orphan_areas;
-struct list_head orphan_onedim;
-
-struct area_info {
- struct list_head by_gid; /* areas in this pid/gid */
- struct list_head blocks; /* blocks in this area */
- u32 nblocks; /* # of blocks in this area */
-
- struct tcm_area area; /* area details */
- struct gid_info *gi; /* link to parent, if still alive */
-};
-
-struct mem_info {
- struct list_head global; /* reserved / global blocks */
- struct tiler_block_t blk; /* block info */
- u32 num_pg; /* number of pages in page-list */
- u32 usr; /* user space address */
- u32 *pg_ptr; /* list of mapped struct page ptrs */
- struct tcm_area area;
- u32 *mem; /* list of alloced phys addresses */
- u32 refs; /* number of times referenced */
- bool alloced; /* still alloced */
-
- struct list_head by_area; /* blocks in the same area / 1D */
- void *parent; /* area info for 2D, else group info */
-};
-
-struct __buf_info {
- struct list_head by_pid; /* list of buffers per pid */
- struct tiler_buf_info buf_info;
- struct mem_info *mi[TILER_MAX_NUM_BLOCKS]; /* blocks */
-};
-
-#define TILER_FORMATS 4
+static struct list_head blocks;
+static struct list_head orphan_areas;
+static struct list_head orphan_onedim;
static s32 tiler_major;
static s32 tiler_minor;
@@ -153,83 +83,169 @@ static struct tmm *tmm[TILER_FORMATS];
#define TMM_SS(ssptr) TMM(TILER_GET_ACC_MODE(ssptr))
#define TMM_SET(fmt, i) tmm[(fmt) - TILFMT_8BIT] = i
-static const u32 tiler_bpps[4] = { 1, 2, 4, 1 };
-static const u32 tiler_strides[4] = { 16384, 32768, 32768, 0 };
-
-static u32 is_tiler_addr(u32 addr)
+/*
+ * TMM connectors
+ * ==========================================================================
+ */
+static s32 refill_pat(struct tmm *tmm, struct tcm_area *area, u32 *ptr)
{
- return addr >= TILVIEW_8BIT && addr < TILVIEW_END;
-}
+ s32 res = 0;
+ s32 size = tcm_sizeof(*area) * sizeof(*ptr);
+ u32 *page;
+ dma_addr_t page_pa;
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
-/* get tiler format */
-static inline u32 tiler_fmt(const struct tiler_block_t *b)
-{
- if (!is_tiler_addr(b->phys))
- BUG();
- /* return TILER_GET_ACC_MODE(b->phys); */
- return TILFMT_8BIT + (((b->phys - TILVIEW_8BIT) >> 27) & 3);
+ /* must be a 16-byte aligned physical address */
+ page = dma_alloc_coherent(NULL, size, &page_pa, GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ memcpy(page, ptr, sizeof(*ptr) * tcm_sizeof(slice));
+ ptr += tcm_sizeof(slice);
+
+ if (tmm_map(tmm, p_area, page_pa)) {
+ res = -EFAULT;
+ break;
+ }
+ }
+
+ dma_free_coherent(NULL, size, page, page_pa);
+
+ return res;
}
-/* get tiler block bpp */
-static inline u32 tiler_bpp(const struct tiler_block_t *b)
+static void clear_pat(struct tmm *tmm, struct tcm_area *area)
{
- return tiler_bpps[tiler_fmt(b) - TILFMT_8BIT];
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ tmm_clear(tmm, p_area);
+ }
}
-/* get tiler block virtual stride */
-static inline u32 tiler_vstride(const struct tiler_block_t *b)
+/*
+ * ID handling methods
+ * ==========================================================================
+ */
+
+/* check if an id is used */
+static bool _m_id_in_use(u32 id)
{
- return PAGE_ALIGN((b->phys & ~PAGE_MASK) + tiler_bpp(b) * b->width);
+ struct mem_info *mi;
+ list_for_each_entry(mi, &blocks, global)
+ if (mi->blk.id == id)
+ return 1;
+ return 0;
}
-/* get tiler block physical stride */
-static inline u32 tiler_pstride(const struct tiler_block_t *b)
+/* get an id */
+static u32 _m_get_id(void)
{
- return tiler_strides[tiler_fmt(b) - TILFMT_8BIT] ? : tiler_vstride(b);
+ static u32 id = 0x2d7ae;
+
+ /* ensure noone is using this id */
+ while (_m_id_in_use(id)) {
+ /* Galois LSFR: 32, 22, 2, 1 */
+ id = (id >> 1) ^ (u32)((0 - (id & 1u)) & 0x80200003u);
+ }
+
+ return id;
}
-/* returns the virtual size of the block (for mmap) */
-static inline u32 tiler_size(const struct tiler_block_t *b)
+/*
+ * gid_info handling methods
+ * ==========================================================================
+ */
+
+/* must have mutex */
+static struct gid_info *_m_get_gi(struct process_info *pi, u32 gid)
{
- return b->height * tiler_vstride(b);
+ struct gid_info *gi;
+
+ /* see if group already exist */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ if (gi->gid == gid)
+ goto done;
+ }
+
+ /* create new group */
+ gi = kmalloc(sizeof(*gi), GFP_KERNEL);
+ if (!gi)
+ return gi;
+
+ memset(gi, 0, sizeof(*gi));
+ INIT_LIST_HEAD(&gi->areas);
+ INIT_LIST_HEAD(&gi->onedim);
+ INIT_LIST_HEAD(&gi->reserved);
+ gi->pi = pi;
+ gi->gid = gid;
+ list_add(&gi->by_pid, &pi->groups);
+done:
+ /*
+ * Once area is allocated, the group info's ref count will be
+ * decremented as the reference is no longer needed.
+ */
+ gi->refs++;
+ return gi;
}
-/* get process info, and increment refs for device tracking */
-static struct process_info *__get_pi(pid_t pid, bool kernel)
+/* (must have mutex) */
+static void _m_try_free_group(struct gid_info *gi)
{
- struct process_info *pi;
+ if (gi && list_empty(&gi->areas) && list_empty(&gi->onedim) &&
+ /* also ensure noone is still using this group */
+ !gi->refs) {
+ BUG_ON(!list_empty(&gi->reserved));
+ list_del(&gi->by_pid);
- /* treat all processes as the same, kernel processes are still treated
- differently so not to free kernel allocated areas when a user process
- closes the tiler driver */
- if (!security)
- pid = 0;
+ /* if group is tracking kernel objects, we may free even
+ the process info */
+ if (gi->pi->kernel && list_empty(&gi->pi->groups)) {
+ list_del(&gi->pi->list);
+ kfree(gi->pi);
+ }
- /* find process context */
- mutex_lock(&mtx);
- list_for_each_entry(pi, &procs, list) {
- if (pi->pid == pid && pi->kernel == kernel)
- goto done;
+ kfree(gi);
}
+}
- /* create process context */
- pi = kmalloc(sizeof(*pi), GFP_KERNEL);
- if (!pi)
- goto done;
+/* --- external versions --- */
- memset(pi, 0, sizeof(*pi));
- pi->pid = pid;
- pi->kernel = kernel;
- INIT_LIST_HEAD(&pi->groups);
- INIT_LIST_HEAD(&pi->bufs);
- list_add(&pi->list, &procs);
-done:
- if (pi && !kernel)
- pi->refs++;
+struct gid_info *get_gi(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+ return gi;
+}
+
+void release_gi(struct gid_info *gi)
+{
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
mutex_unlock(&mtx);
- return pi;
}
+/*
+ * Area handling methods
+ * ==========================================================================
+ */
+
/* allocate an reserved area of size, alignment and link it to gi */
/* leaves mutex locked to be able to add block to area */
static struct area_info *area_new_m(u16 width, u16 height, u16 align,
@@ -255,7 +271,7 @@ static struct area_info *area_new_m(u16 width, u16 height, u16 align,
return ai;
}
-/* (must have mutex) free an area and return NULL */
+/* (must have mutex) free an area */
static inline void _m_area_free(struct area_info *ai)
{
if (ai) {
@@ -264,7 +280,7 @@ static inline void _m_area_free(struct area_info *ai)
}
}
-static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
+s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
u16 *x_area, u16 *y_area, u16 *band,
u16 *align, u16 *offs, u16 *in_offs)
{
@@ -282,14 +298,17 @@ static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
case TILFMT_8BIT:
slot_w = DMM_PAGE_DIMM_X_MODE_8;
slot_h = DMM_PAGE_DIMM_Y_MODE_8;
+ bpp = 1;
break;
case TILFMT_16BIT:
slot_w = DMM_PAGE_DIMM_X_MODE_16;
slot_h = DMM_PAGE_DIMM_Y_MODE_16;
+ bpp = 2;
break;
case TILFMT_32BIT:
slot_w = DMM_PAGE_DIMM_X_MODE_32;
slot_h = DMM_PAGE_DIMM_Y_MODE_32;
+ bpp = 4;
break;
case TILFMT_PAGE:
/* adjust size to accomodate offset, only do page alignment */
@@ -308,7 +327,6 @@ static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
}
/* get the # of bytes per row in 1 slot */
- bpp = tiler_bpps[fmt - TILFMT_8BIT];
slot_row = slot_w * bpp;
/* how many slots are can be accessed via one physical page */
@@ -319,7 +337,7 @@ static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
*align = ALIGN(*align ? : default_align, min_align);
/* offset must be multiple of bpp */
- if (*offs & (bpp - 1))
+ if (*offs & (bpp - 1) || *offs >= *align)
return -EINVAL;
/* round down the offset to the nearest slot size, and increase width
@@ -398,7 +416,8 @@ struct mem_info *_m_add2area(struct mem_info *mi, struct area_info *ai,
}
static struct mem_info *get_2d_area(u16 w, u16 h, u16 align, u16 offs, u16 band,
- struct gid_info *gi, struct tcm *tcm) {
+ struct gid_info *gi, struct tcm *tcm)
+{
struct area_info *ai = NULL;
struct mem_info *mi = NULL;
struct list_head *before = NULL;
@@ -459,39 +478,88 @@ done:
return mi;
}
-/* (must have mutex) */
-static void _m_try_free_group(struct gid_info *gi)
+/* layout reserved 2d areas in a larger area */
+/* NOTE: band, w, h, a(lign), o(ffs) is in slots */
+s32 reserve_2d(enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, u16 offs, struct gid_info *gi,
+ struct list_head *pos)
{
- if (gi && list_empty(&gi->areas) && list_empty(&gi->onedim) &&
- /* also ensure noone is still using this group */
- !gi->refs) {
- BUG_ON(!list_empty(&gi->reserved));
- list_del(&gi->by_pid);
+ u16 x, x0, e = ALIGN(w, align), w_res = (n - 1) * e + w;
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
- /* if group is tracking kernel objects, we may free even
- the process info */
- if (gi->pi->kernel && list_empty(&gi->pi->groups)) {
- list_del(&gi->pi->list);
- kfree(gi->pi);
- }
+ printk(KERN_INFO "packing %u %u buffers into %u width\n",
+ n, w, w_res);
- kfree(gi);
+ /* calculate dimensions, band, offs and alignment in slots */
+ /* reserve an area */
+ ai = area_new_m(ALIGN(w_res + offs, max(band, align)), h,
+ max(band, align), TCM(fmt), gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (n = 0, x = offs; x < w_res; x += e, n++) {
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+ x0 = ai->area.p0.x + x;
+ _m_add2area(mi, ai, x0, w, &ai->blocks);
+ list_add(&mi->global, pos);
}
+
+ mutex_unlock(&mtx);
+ return n;
}
-static void clear_pat(struct tmm *tmm, struct tcm_area *area)
+/* reserve nv12 blocks if standard allocator is inefficient */
+/* TILER is designed so that a (w * h) * 8bit area is twice as wide as a
+ (w/2 * h/2) * 16bit area. Since having pairs of such 8-bit and 16-bit
+ blocks is a common usecase for TILER, we optimize packing these into a
+ TILER area */
+s32 pack_nv12(int n, u16 w, u16 w1, u16 h, struct gid_info *gi,
+ u8 *p)
{
- struct pat_area p_area = {0};
- struct tcm_area slice, area_s;
+ u16 wh = (w1 + 1) >> 1, width, x0;
+ int m;
- tcm_for_each_slice(slice, *area, area_s) {
- p_area.x0 = slice.p0.x;
- p_area.y0 = slice.p0.y;
- p_area.x1 = slice.p1.x;
- p_area.y1 = slice.p1.y;
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
+ struct list_head *pos;
- tmm_clear(tmm, p_area);
+ /* reserve area */
+ ai = area_new_m(w, h, 64, TILFMT_8BIT, gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (m = 0; m < 2 * n; m++) {
+ width = (m & 1) ? wh : w1;
+ x0 = ai->area.p0.x + *p++;
+
+ /* get insertion head */
+ list_for_each(pos, &ai->blocks) {
+ mi = list_entry(pos, struct mem_info, by_area);
+ if (mi->area.p0.x > x0)
+ break;
+ }
+
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+
+ _m_add2area(mi, ai, x0, width, pos);
+ list_add(&mi->global, &gi->reserved);
}
+
+ mutex_unlock(&mtx);
+ return n;
}
/* (must have mutex) free block and any freed areas */
@@ -562,7 +630,7 @@ static bool _m_chk_ref(struct mem_info *mi)
}
/* (must have mutex) */
-static inline s32 _m_dec_ref(struct mem_info *mi)
+static inline bool _m_dec_ref(struct mem_info *mi)
{
if (mi->refs-- <= 1)
return _m_chk_ref(mi);
@@ -586,281 +654,216 @@ static inline bool _m_try_free(struct mem_info *mi)
return _m_chk_ref(mi);
}
-/* check if an id is used */
-static bool _m_id_in_use(u32 id)
-{
- struct mem_info *mi;
- list_for_each_entry(mi, &blocks, global)
- if (mi->blk.id == id)
- return 1;
- return 0;
-}
-
-/* check if an offset is used */
-static bool _m_offs_in_use(u32 offs, u32 length, struct process_info *pi)
-{
- struct __buf_info *_b;
- list_for_each_entry(_b, &pi->bufs, by_pid)
- if (_b->buf_info.offset < offs + length &&
- _b->buf_info.offset + _b->buf_info.length > offs)
- return 1;
- return 0;
-}
-
-/* get an id */
-static u32 _m_get_id(void)
-{
- static u32 id = 0x2d7ae;
-
- /* ensure noone is using this id */
- while (_m_id_in_use(id)) {
- /* Galois LSFR: 32, 22, 2, 1 */
- id = (id >> 1) ^ (u32)((0 - (id & 1u)) & 0x80200003u);
- }
-
- return id;
-}
-
-/* get an offset */
-static u32 _m_get_offs(struct process_info *pi, u32 length)
-{
- static u32 offs = 0xda7a;
-
- /* ensure no-one is using this offset */
- while ((offs << PAGE_SHIFT) + length < length ||
- _m_offs_in_use(offs << PAGE_SHIFT, length, pi)) {
- /* Galois LSF: 20, 17 */
- offs = (offs >> 1) ^ (u32)((0 - (offs & 1u)) & 0x90000);
- }
-
- return offs << PAGE_SHIFT;
-}
+/* --- external methods --- */
-static s32 register_buf(struct __buf_info *_b, struct process_info *pi)
-{
+/* find a block by key/id and lock it */
+struct mem_info *
+find_n_lock(u32 key, u32 id, struct gid_info *gi) {
+ struct area_info *ai = NULL;
struct mem_info *mi = NULL;
- struct tiler_buf_info *b = &_b->buf_info;
- u32 i, num = b->num_blocks, remain = num;
-
- /* check validity */
- if (num > TILER_MAX_NUM_BLOCKS)
- return -EINVAL;
mutex_lock(&mtx);
- /* find each block */
- b->length = 0;
- list_for_each_entry(mi, &blocks, global) {
- for (i = 0; i < num; i++) {
- if (!_b->mi[i] && mi->blk.id == b->blocks[i].id &&
- mi->blk.key == b->blocks[i].key) {
- _b->mi[i] = mi;
- b->length += tiler_size(&mi->blk);
- /* quit if found all*/
- if (!--remain)
- break;
+ /* if group is not given, look globally */
+ if (!gi) {
+ list_for_each_entry(mi, &blocks, global) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ } else {
+ /* is id is ssptr, we know if block is 1D or 2D by the address,
+ so we optimize lookup */
+ if (!ssptr_id ||
+ TILER_GET_ACC_MODE(id) == TILFMT_PAGE) {
+ list_for_each_entry(mi, &gi->onedim, by_area) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ }
+ if (!ssptr_id ||
+ TILER_GET_ACC_MODE(id) != TILFMT_PAGE) {
+ list_for_each_entry(ai, &gi->areas, by_gid) {
+ list_for_each_entry(mi, &ai->blocks, by_area) {
+ if (mi->blk.key == key &&
+ mi->blk.id == id)
+ goto done;
+ }
}
}
}
- /* if found all, register buffer */
- if (!remain) {
- b->offset = _m_get_offs(pi, b->length);
-
- list_add(&_b->by_pid, &pi->bufs);
-
- /* using each block */
- for (i = 0; i < num; i++)
- _m_inc_ref(_b->mi[i]);
- }
+ mi = NULL;
+done:
+ /* lock block by increasing its ref count */
+ if (mi)
+ mi->refs++;
mutex_unlock(&mtx);
- return remain ? -EACCES : 0;
+ return mi;
}
-/* must have mutex */
-static void _m_unregister_buf(struct __buf_info *_b)
+/* unlock a block, and optionally free it */
+void unlock_n_free(struct mem_info *mi, bool free)
{
- u32 i;
-
- /* unregister */
- list_del(&_b->by_pid);
+ mutex_lock(&mtx);
- /* no longer using the blocks */
- for (i = 0; i < _b->buf_info.num_blocks; i++)
- _m_dec_ref(_b->mi[i]);
+ _m_dec_ref(mi);
+ if (free)
+ _m_try_free(mi);
- kfree(_b);
+ mutex_unlock(&mtx);
}
/**
- * Free all info kept by a process:
+ * Free all blocks in a group:
*
- * all registered buffers, allocated blocks, and unreferenced
- * blocks. Any blocks/areas still referenced will move to the
- * orphaned lists to avoid issues if a new process is created
+ * allocated blocks, and unreferenced blocks. Any blocks/areas still referenced
+ * will move to the orphaned lists to avoid issues if a new process is created
* with the same pid.
*
* (must have mutex)
*/
-static void _m_free_process_info(struct process_info *pi)
+void destroy_group(struct gid_info *gi)
{
struct area_info *ai, *ai_;
struct mem_info *mi, *mi_;
- struct gid_info *gi, *gi_;
- struct __buf_info *_b = NULL, *_b_ = NULL;
bool ai_autofreed, need2free;
- /* unregister all buffers */
- list_for_each_entry_safe(_b, _b_, &pi->bufs, by_pid)
- _m_unregister_buf(_b);
-
- BUG_ON(!list_empty(&pi->bufs));
+ mutex_lock(&mtx);
/* free all allocated blocks, and remove unreferenced ones */
- list_for_each_entry_safe(gi, gi_, &pi->groups, by_pid) {
-
- /*
- * Group info structs when they become empty on an _m_try_free.
- * However, if the group info is already empty, we need to
- * remove it manually
- */
- need2free = list_empty(&gi->areas) && list_empty(&gi->onedim);
- list_for_each_entry_safe(ai, ai_, &gi->areas, by_gid) {
- ai_autofreed = true;
- list_for_each_entry_safe(mi, mi_, &ai->blocks, by_area)
- ai_autofreed &= _m_try_free(mi);
-
- /* save orphaned areas for later removal */
- if (!ai_autofreed) {
- need2free = true;
- ai->gi = NULL;
- list_move(&ai->by_gid, &orphan_areas);
- }
- }
- list_for_each_entry_safe(mi, mi_, &gi->onedim, by_area) {
- if (!_m_try_free(mi)) {
- need2free = true;
- /* save orphaned 1D blocks */
- mi->parent = NULL;
- list_move(&mi->by_area, &orphan_onedim);
- }
+ /*
+ * Group info structs when they become empty on an _m_try_free.
+ * However, if the group info is already empty, we need to
+ * remove it manually
+ */
+ need2free = list_empty(&gi->areas) && list_empty(&gi->onedim);
+ list_for_each_entry_safe(ai, ai_, &gi->areas, by_gid) {
+ ai_autofreed = true;
+ list_for_each_entry_safe(mi, mi_, &ai->blocks, by_area)
+ ai_autofreed &= _m_try_free(mi);
+
+ /* save orphaned areas for later removal */
+ if (!ai_autofreed) {
+ need2free = true;
+ ai->gi = NULL;
+ list_move(&ai->by_gid, &orphan_areas);
}
+ }
- /* if group is still alive reserved list should have been
- emptied as there should be no reference on those blocks */
- if (need2free) {
- BUG_ON(!list_empty(&gi->onedim));
- BUG_ON(!list_empty(&gi->areas));
- _m_try_free_group(gi);
+ list_for_each_entry_safe(mi, mi_, &gi->onedim, by_area) {
+ if (!_m_try_free(mi)) {
+ need2free = true;
+ /* save orphaned 1D blocks */
+ mi->parent = NULL;
+ list_move(&mi->by_area, &orphan_onedim);
}
}
- BUG_ON(!list_empty(&pi->groups));
- list_del(&pi->list);
- kfree(pi);
+ /* if group is still alive reserved list should have been
+ emptied as there should be no reference on those blocks */
+ if (need2free) {
+ BUG_ON(!list_empty(&gi->onedim));
+ BUG_ON(!list_empty(&gi->areas));
+ _m_try_free_group(gi);
+ }
+
+ mutex_unlock(&mtx);
}
-static s32 get_area(u32 sys_addr, struct tcm_pt *pt)
+/* release (reserved) blocks */
+void release_blocks(struct list_head *reserved)
{
- enum tiler_fmt fmt;
+ struct mem_info *mi, *mi_;
- sys_addr &= TILER_ALIAS_VIEW_CLEAR;
- fmt = TILER_GET_ACC_MODE(sys_addr);
+ mutex_lock(&mtx);
- switch (fmt) {
- case TILFMT_8BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_8(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_8(sys_addr);
- break;
- case TILFMT_16BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_16(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_16(sys_addr);
- break;
- case TILFMT_32BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_32(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_32(sys_addr);
- break;
- case TILFMT_PAGE:
- pt->x = (sys_addr & 0x7FFFFFF) >> 12;
- pt->y = pt->x / TILER_WIDTH;
- pt->x &= (TILER_WIDTH - 1);
- break;
- default:
- return -EFAULT;
+ /* find block in global list and free it */
+ list_for_each_entry_safe(mi, mi_, reserved, global) {
+ BUG_ON(mi->refs || mi->alloced);
+ _m_free(mi);
}
- return 0x0;
+ mutex_unlock(&mtx);
}
-static u32 __get_alias_addr(enum tiler_fmt fmt, u16 x, u16 y)
+/* add reserved blocks to a group */
+void add_reserved_blocks(struct list_head *reserved, struct gid_info *gi)
{
- u32 acc_mode = -1;
- u32 x_shft = -1, y_shft = -1;
+ mutex_lock(&mtx);
+ list_splice_init(reserved, &gi->reserved);
+ mutex_unlock(&mtx);
+}
- switch (fmt) {
- case TILFMT_8BIT:
- acc_mode = 0; x_shft = 6; y_shft = 20;
- break;
- case TILFMT_16BIT:
- acc_mode = 1; x_shft = 7; y_shft = 20;
- break;
- case TILFMT_32BIT:
- acc_mode = 2; x_shft = 7; y_shft = 20;
- break;
- case TILFMT_PAGE:
- acc_mode = 3; y_shft = 8;
- break;
- default:
- return 0;
- break;
+/* find a block by ssptr */
+struct mem_info *find_block_by_ssptr(u32 sys_addr)
+{
+ struct mem_info *i;
+ struct tcm_pt pt;
+ u32 x, y;
+ enum tiler_fmt fmt;
+ struct tiler_block_t blk = {0};
+
+ if (!is_tiler_addr(sys_addr))
+ return NULL;
+
+ blk.phys = sys_addr;
+ fmt = tiler_fmt(&blk);
+
+ /* convert x & y pixel coordinates to slot coordinates */
+ tiler_get_natural_xy(sys_addr, &x, &y);
+ pt.x = x / (fmt == TILFMT_PAGE ? 1 : fmt == TILFMT_32BIT ? 32 : 64);
+ pt.y = y / (fmt == TILFMT_PAGE ? 1 : fmt == TILFMT_8BIT ? 64 : 32);
+
+ mutex_lock(&mtx);
+ list_for_each_entry(i, &blocks, global) {
+ if (tiler_fmt(&i->blk) == TILER_GET_ACC_MODE(sys_addr) &&
+ tcm_is_in(pt, i->area))
+ goto found;
}
+ i = NULL;
- if (fmt == TILFMT_PAGE)
- return (u32)TIL_ALIAS_ADDR((x | y << y_shft) << 12, acc_mode);
- else
- return (u32)TIL_ALIAS_ADDR(x << x_shft | y << y_shft, acc_mode);
+found:
+ mutex_unlock(&mtx);
+ return i;
}
-/* must have mutex */
-static struct gid_info *_m_get_gi(struct process_info *pi, u32 gid)
+/* find a block by ssptr */
+void fill_block_info(struct mem_info *i, struct tiler_block_info *blk)
{
- struct gid_info *gi;
+ blk->ptr = NULL;
+ blk->fmt = TILER_GET_ACC_MODE(i->blk.phys);
+ blk->ssptr = i->blk.phys;
- /* see if group already exist */
- list_for_each_entry(gi, &pi->groups, by_pid) {
- if (gi->gid == gid)
- goto done;
+ if (blk->fmt == TILFMT_PAGE) {
+ blk->dim.len = i->blk.width;
+ blk->stride = 0;
+ blk->group_id = ((struct gid_info *) i->parent)->gid;
+ } else {
+ blk->stride = tiler_vstride(&i->blk);
+ blk->dim.area.width = i->blk.width;
+ blk->dim.area.height = i->blk.height;
+ blk->group_id = ((struct area_info *) i->parent)->gi->gid;
}
-
- /* create new group */
- gi = kmalloc(sizeof(*gi), GFP_KERNEL);
- if (!gi)
- return gi;
-
- memset(gi, 0, sizeof(*gi));
- INIT_LIST_HEAD(&gi->areas);
- INIT_LIST_HEAD(&gi->onedim);
- INIT_LIST_HEAD(&gi->reserved);
- gi->pi = pi;
- gi->gid = gid;
- list_add(&gi->by_pid, &pi->groups);
-done:
- /*
- * Once area is allocated, the group info's ref count will be
- * decremented as the reference is no longer needed.
- */
- gi->refs++;
- return gi;
+ blk->id = i->blk.id;
+ blk->key = i->blk.key;
+ blk->offs = i->blk.phys & ~PAGE_MASK;
+ blk->align = 0;
}
+/*
+ * Block operations
+ * ==========================================================================
+ */
+
static struct mem_info *__get_area(enum tiler_fmt fmt, u32 width, u32 height,
u16 align, u16 offs, struct gid_info *gi)
{
u16 x, y, band, in_offs = 0;
struct mem_info *mi = NULL;
+ struct tiler_view_orient orient = {0};
/* calculate dimensions, band, offs and alignment in slots */
if (__analize_area(fmt, width, height, &x, &y, &band, &align, &offs,
@@ -896,167 +899,84 @@ static struct mem_info *__get_area(enum tiler_fmt fmt, u32 width, u32 height,
gi->refs--;
mutex_unlock(&mtx);
- mi->blk.phys = __get_alias_addr(fmt, mi->area.p0.x, mi->area.p0.y)
- + in_offs;
+ mi->blk.phys = tiler_get_address(orient, fmt,
+ mi->area.p0.x * (fmt == TILFMT_PAGE ? 1 :
+ fmt == TILFMT_32BIT ? 32 : 64),
+ mi->area.p0.y * (fmt == TILFMT_PAGE ? 1 :
+ fmt == TILFMT_8BIT ? 64 : 32))
+ + TILER_ALIAS_BASE + in_offs;
return mi;
}
-s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
- struct vm_area_struct *vma, u32 voffs)
-{
- u32 v, p;
- u32 len; /* area to map */
-
- /* don't allow mremap */
- vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
-
- /* mapping must fit into vma */
- if (vma->vm_start > vma->vm_start + voffs ||
- vma->vm_start + voffs > vma->vm_start + voffs + size ||
- vma->vm_start + voffs + size > vma->vm_end)
- BUG();
-
- /* mapping must fit into block */
- if (offs > offs + size ||
- offs + size > tiler_size(blk))
- BUG();
-
- v = tiler_vstride(blk);
- p = tiler_pstride(blk);
-
- /* remap block portion */
- len = v - (offs % v); /* initial area to map */
- while (size) {
- if (len > size)
- len = size;
-
- vma->vm_pgoff = (blk->phys + offs) >> PAGE_SHIFT;
- if (remap_pfn_range(vma, vma->vm_start + voffs, vma->vm_pgoff,
- len, vma->vm_page_prot))
- return -EAGAIN;
- voffs += len;
- offs += len + p - v;
- size -= len;
- len = v; /* subsequent area to map */
- }
- return 0;
-}
-EXPORT_SYMBOL(tiler_mmap_blk);
-
-s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
- u32 addr, u32 mtype)
+s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info)
{
- u32 v, p;
- u32 len; /* area to map */
- const struct mem_type *type = get_mem_type(mtype);
-
- /* mapping must fit into address space */
- if (addr > addr + size)
- BUG();
-
- /* mapping must fit into block */
- if (offs > offs + size ||
- offs + size > tiler_size(blk))
- BUG();
-
- v = tiler_vstride(blk);
- p = tiler_pstride(blk);
-
- /* move offset and address to end */
- offs += blk->phys + size;
- addr += size;
-
- len = v - (offs % v); /* initial area to map */
- while (size) {
- while (len && size) {
- if (ioremap_page(addr - size, offs - size, type))
- return -EAGAIN;
- len -= PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- offs += p - v;
- len = v; /* subsequent area to map */
- }
- return 0;
-}
-EXPORT_SYMBOL(tiler_ioremap_blk);
+ struct mem_info *mi = NULL;
+ struct gid_info *gi = NULL;
-static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct __buf_info *_b = NULL;
- struct tiler_buf_info *b = NULL;
- u32 i, map_offs, map_size, blk_offs, blk_size, mapped_size;
- struct process_info *pi = filp->private_data;
- u32 offs = vma->vm_pgoff << PAGE_SHIFT;
- u32 size = vma->vm_end - vma->vm_start;
+ *info = NULL;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ /* only support up to page alignment */
+ if (align > PAGE_SIZE || offs >= (align ? : default_align) || !pi)
+ return -EINVAL;
+ /* get group context */
mutex_lock(&mtx);
- list_for_each_entry(_b, &pi->bufs, by_pid) {
- if (offs >= _b->buf_info.offset &&
- offs + size <= _b->buf_info.offset + _b->buf_info.length) {
- b = &_b->buf_info;
- break;
- }
- }
+ gi = _m_get_gi(pi, gid);
mutex_unlock(&mtx);
- if (!b)
- return -ENXIO;
-
- /* mmap relevant blocks */
- blk_offs = _b->buf_info.offset;
- mapped_size = 0;
- for (i = 0; i < b->num_blocks; i++, blk_offs += blk_size) {
- blk_size = tiler_size(&_b->mi[i]->blk);
- if (offs >= blk_offs + blk_size || offs + size < blk_offs)
- continue;
- map_offs = max(offs, blk_offs) - blk_offs;
- map_size = min(size - mapped_size, blk_size);
- if (tiler_mmap_blk(&_b->mi[i]->blk, map_offs, map_size, vma,
- mapped_size))
- return -EAGAIN;
- mapped_size += map_size;
- }
- return 0;
-}
-static s32 refill_pat(struct tmm *tmm, struct tcm_area *area, u32 *ptr)
-{
- s32 res = 0;
- s32 size = tcm_sizeof(*area) * sizeof(*ptr);
- u32 *page;
- dma_addr_t page_pa;
- struct pat_area p_area = {0};
- struct tcm_area slice, area_s;
+ if (!gi)
+ return -ENOMEM;
- /* must be a 16-byte aligned physical address */
- page = dma_alloc_coherent(NULL, size, &page_pa, GFP_ATOMIC);
- if (!page)
+ /* reserve area in tiler container */
+ mi = __get_area(fmt, width, height, align, offs, gi);
+ if (!mi) {
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
return -ENOMEM;
+ }
- tcm_for_each_slice(slice, *area, area_s) {
- p_area.x0 = slice.p0.x;
- p_area.y0 = slice.p0.y;
- p_area.x1 = slice.p1.x;
- p_area.y1 = slice.p1.y;
+ mi->blk.width = width;
+ mi->blk.height = height;
+ mi->blk.key = key;
+ if (ssptr_id) {
+ mi->blk.id = mi->blk.phys;
+ } else {
+ mutex_lock(&mtx);
+ mi->blk.id = _m_get_id();
+ mutex_unlock(&mtx);
+ }
- memcpy(page, ptr, sizeof(*ptr) * tcm_sizeof(slice));
- ptr += tcm_sizeof(slice);
+ /* allocate and map if mapping is supported */
+ if (tmm_can_map(TMM(fmt))) {
+ mi->num_pg = tcm_sizeof(mi->area);
- if (tmm_map(tmm, p_area, page_pa)) {
- res = -EFAULT;
- break;
- }
+ mi->mem = tmm_get(TMM(fmt), mi->num_pg);
+ if (!mi->mem)
+ goto cleanup;
+
+ /* Ensure the data reaches to main memory before PAT refill */
+ wmb();
+
+ /* program PAT */
+ if (refill_pat(TMM(fmt), &mi->area, mi->mem))
+ goto cleanup;
}
+ *info = mi;
+ return 0;
- dma_free_coherent(NULL, size, page, page_pa);
+cleanup:
+ mutex_lock(&mtx);
+ _m_free(mi);
+ mutex_unlock(&mtx);
+ return -ENOMEM;
- return res;
}
-static s32 map_block(enum tiler_fmt fmt, u32 width, u32 height,
+s32 map_block(enum tiler_fmt fmt, u32 width, u32 height,
u32 key, u32 gid, struct process_info *pi,
struct mem_info **info, u32 usr_addr)
{
@@ -1191,850 +1111,10 @@ done:
return res;
}
-s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 gid,
- pid_t pid, u32 usr_addr)
-{
- struct mem_info *mi;
- struct process_info *pi;
- s32 res;
-
- if (!blk || blk->phys)
- BUG();
-
- pi = __get_pi(pid, true);
- if (!pi)
- return -ENOMEM;
-
- res = map_block(fmt, blk->width, blk->height, blk->key, gid, pi, &mi,
- usr_addr);
- if (mi)
- blk->phys = mi->blk.phys;
- return res;
-
-}
-EXPORT_SYMBOL(tiler_mapx);
-
-s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr)
-{
- return tiler_mapx(blk, fmt, 0, current->tgid, usr_addr);
-}
-EXPORT_SYMBOL(tiler_map);
-
-s32 free_block(u32 key, u32 id, struct process_info *pi)
-{
- struct gid_info *gi = NULL;
- struct area_info *ai = NULL;
- struct mem_info *mi = NULL;
- s32 res = -ENOENT;
-
- mutex_lock(&mtx);
-
- /* find block in process list and free it */
- list_for_each_entry(gi, &pi->groups, by_pid) {
- /* is id is ssptr, we know if block is 1D or 2D by the address,
- so we optimize lookup */
- if (!ssptr_id ||
- TILER_GET_ACC_MODE(id) == TILFMT_PAGE) {
- list_for_each_entry(mi, &gi->onedim, by_area) {
- if (mi->blk.key == key && mi->blk.id == id) {
- _m_try_free(mi);
- res = 0;
- goto done;
- }
- }
- }
-
- if (!ssptr_id ||
- TILER_GET_ACC_MODE(id) != TILFMT_PAGE) {
- list_for_each_entry(ai, &gi->areas, by_gid) {
- list_for_each_entry(mi, &ai->blocks, by_area) {
- if (mi->blk.key == key &&
- mi->blk.id == id) {
- _m_try_free(mi);
- res = 0;
- goto done;
- }
- }
- }
- }
- }
-
-done:
- mutex_unlock(&mtx);
-
- /* for debugging, we can set the PAT entries to DMM_LISA_MAP__0 */
- return res;
-}
-
-static s32 free_block_global(u32 key, u32 id)
-{
- struct mem_info *mi;
- s32 res = -ENOENT;
-
- mutex_lock(&mtx);
-
- /* find block in global list and free it */
- list_for_each_entry(mi, &blocks, global) {
- if (mi->blk.key == key && mi->blk.id == id) {
- _m_try_free(mi);
- res = 0;
- break;
- }
- }
- mutex_unlock(&mtx);
-
- /* for debugging, we can set the PAT entries to DMM_LISA_MAP__0 */
- return res;
-}
-
-s32 tiler_free(struct tiler_block_t *blk)
-{
- return free_block_global(blk->key, blk->id);
-}
-EXPORT_SYMBOL(tiler_free);
-
-s32 find_block(u32 sys_addr, struct tiler_block_info *blk)
-{
- struct mem_info *i;
- struct tcm_pt pt;
-
- if (!is_tiler_addr(sys_addr))
- return -EFAULT;
-
- if (get_area(sys_addr, &pt))
- return -EFAULT;
-
- list_for_each_entry(i, &blocks, global) {
- if (tiler_fmt(&i->blk) == TILER_GET_ACC_MODE(sys_addr) &&
- tcm_is_in(pt, i->area))
- goto found;
- }
- return -EFAULT;
-
-found:
- blk->ptr = NULL;
- blk->fmt = TILER_GET_ACC_MODE(i->blk.phys);
- blk->ssptr = i->blk.phys;
-
- if (blk->fmt == TILFMT_PAGE) {
- blk->dim.len = i->blk.width;
- blk->stride = 0;
- blk->group_id = ((struct gid_info *) i->parent)->gid;
- } else {
- blk->stride = tiler_vstride(&i->blk);
- blk->dim.area.width = i->blk.width;
- blk->dim.area.height = i->blk.height;
- blk->group_id = ((struct area_info *) i->parent)->gi->gid;
- }
- blk->id = i->blk.id;
- blk->key = i->blk.key;
- blk->offs = i->blk.phys & ~PAGE_MASK;
- blk->align = 0;
- return 0;
-}
-
-static s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 key, u32 gid, struct process_info *pi,
- struct mem_info **info);
-
-/* we have two algorithms for packing nv12 blocks */
-
-/* we want to find the most effective packing for the smallest area */
-
-/* layout reserved 2d areas in a larger area */
-/* NOTE: band, w, h, a(lign), o(ffs) is in slots */
-static s32 reserve_2d(enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
- u16 align, u16 offs, struct gid_info *gi,
- struct list_head *pos)
-{
- u16 x, x0, e = ALIGN(w, align), w_res = (n - 1) * e + w;
- struct mem_info *mi = NULL;
- struct area_info *ai = NULL;
-
- printk(KERN_INFO "packing %u %u buffers into %u width\n",
- n, w, w_res);
-
- /* calculate dimensions, band, offs and alignment in slots */
- /* reserve an area */
- ai = area_new_m(ALIGN(w_res + offs, max(band, align)), h,
- max(band, align), TCM(fmt), gi);
- if (!ai)
- return -ENOMEM;
-
- /* lay out blocks in the reserved area */
- for (n = 0, x = offs; x < w_res; x += e, n++) {
- /* reserve a block struct */
- mi = kmalloc(sizeof(*mi), GFP_KERNEL);
- if (!mi)
- break;
-
- memset(mi, 0, sizeof(*mi));
- x0 = ai->area.p0.x + x;
- _m_add2area(mi, ai, x0, w, &ai->blocks);
- list_add(&mi->global, pos);
- }
-
- mutex_unlock(&mtx);
- return n;
-}
-
-/* reserve nv12 blocks if standard allocator is inefficient */
-/* TILER is designed so that a (w * h) * 8bit area is twice as wide as a
- (w/2 * h/2) * 16bit area. Since having pairs of such 8-bit and 16-bit
- blocks is a common usecase for TILER, we optimize packing these into a
- TILER area */
-static s32 pack_nv12(int n, u16 w, u16 w1, u16 h, struct gid_info *gi,
- u8 *p)
-{
- u16 wh = (w1 + 1) >> 1, width, x0;
- int m;
-
- struct mem_info *mi = NULL;
- struct area_info *ai = NULL;
- struct list_head *pos;
-
- /* reserve area */
- ai = area_new_m(w, h, 64, TCM(TILFMT_8BIT), gi);
- if (!ai)
- return -ENOMEM;
-
- /* lay out blocks in the reserved area */
- for (m = 0; m < 2 * n; m++) {
- width = (m & 1) ? wh : w1;
- x0 = ai->area.p0.x + *p++;
-
- /* get insertion head */
- list_for_each(pos, &ai->blocks) {
- mi = list_entry(pos, struct mem_info, by_area);
- if (mi->area.p0.x > x0)
- break;
- }
-
- /* reserve a block struct */
- mi = kmalloc(sizeof(*mi), GFP_KERNEL);
- if (!mi)
- break;
-
- memset(mi, 0, sizeof(*mi));
-
- _m_add2area(mi, ai, x0, width, pos);
- list_add(&mi->global, &gi->reserved);
- }
-
- mutex_unlock(&mtx);
- return n;
-}
-
-static inline u32 nv12_eff(u16 n, u16 w, u16 area, u16 n_need)
-{
- /* rank by total area needed first */
- return 0x10000000 - DIV_ROUND_UP(n_need, n) * area * 32 +
- /* then by efficiency */
- 1024 * n * ((w * 3 + 1) >> 1) / area;
-}
-
-static void reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
- u32 gid, struct process_info *pi)
-{
- /* adjust alignment to at least 128 bytes (16-bit slot width) */
- u16 w, h, band, a = MAX(128, align), o = offs, eff_w;
- struct gid_info *gi;
- int res = 0, res2, i;
- u16 n_t, n_s, area_t, area_s;
- u8 packing[2 * 21];
- struct list_head reserved = LIST_HEAD_INIT(reserved);
- struct mem_info *mi, *mi_;
- bool can_together = TMM(TILFMT_8BIT) == TMM(TILFMT_16BIT);
-
- /* Check input parameters for correctness, and support */
- if (!width || !height || !n ||
- offs >= (align ? : default_align) || offs & 1 ||
- align >= PAGE_SIZE || TCM(TILFMT_8BIT) != TCM(TILFMT_16BIT) ||
- n > TILER_WIDTH * TILER_HEIGHT / 2)
- return;
-
- /* calculate dimensions, band, offs and alignment in slots */
- if (__analize_area(TILFMT_8BIT, width, height, &w, &h, &band, &a, &o,
- NULL))
- return;
-
- /* get group context */
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- mutex_unlock(&mtx);
- if (!gi)
- return;
-
- eff_w = ALIGN(w, a);
-
- for (i = 0; i < n && res >= 0; i += res) {
- /* check packing separately vs together */
- n_s = nv12_separate(o, w, a, n - i, &area_s);
- if (can_together)
- n_t = nv12_together(o, w, a, n - i, &area_t, packing);
- else
- n_t = 0;
-
- /* pack based on better efficiency */
- res = -1;
- if (!can_together ||
- nv12_eff(n_s, w, area_s, n - i) >
- nv12_eff(n_t, w, area_t, n - i)) {
- /* pack separately */
-
- res = reserve_2d(TILFMT_8BIT, n_s, w, h, band, a, o, gi,
- &reserved);
-
- /* only reserve 16-bit blocks if 8-bit was successful,
- as we will try to match 16-bit areas to an already
- reserved 8-bit area, and there is no guarantee that
- an unreserved 8-bit area will match the offset of
- a singly reserved 16-bit area. */
- res2 = (res < 0 ? res :
- reserve_2d(TILFMT_16BIT, n_s, (w + 1) / 2, h,
- band / 2, a / 2, o / 2, gi, &reserved));
- if (res2 < 0 || res != res2) {
- /* clean up */
- mutex_lock(&mtx);
- list_for_each_entry_safe(mi, mi_, &reserved,
- global)
- _m_free(mi);
- mutex_unlock(&mtx);
- res = -1;
- } else {
- /* add list to reserved */
- mutex_lock(&mtx);
- list_splice_init(&reserved, &gi->reserved);
- mutex_unlock(&mtx);
- }
- }
-
- /* if separate packing failed, still try to pack together */
- if (res < 0 && can_together && n_t) {
- /* pack together */
- res = pack_nv12(n_t, area_t, w, h, gi, packing);
- }
- }
-
- mutex_lock(&mtx);
- gi->refs--;
- _m_try_free_group(gi);
- mutex_unlock(&mtx);
-}
-
-/* reserve 2d blocks (if standard allocator is inefficient) */
-static void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid,
- struct process_info *pi)
-{
- u32 til_width, bpp, bpt, res = 0, i;
- u16 o = offs, a = align, band, w, h, e, n_try;
- struct gid_info *gi;
-
- /* Check input parameters for correctness, and support */
- if (!width || !height || !n ||
- align > PAGE_SIZE || offs >= (align ? : default_align) ||
- fmt < TILFMT_8BIT || fmt > TILFMT_32BIT)
- return;
-
- /* tiler page width in pixels, bytes per pixel, tiler page in bytes */
- til_width = fmt == TILFMT_32BIT ? 32 : 64;
- bpp = 1 << (fmt - TILFMT_8BIT);
- bpt = til_width * bpp;
-
- /* check offset. Also, if block is less than half the mapping window,
- the default allocation is sufficient. Also check for basic area
- info. */
- if (width * bpp * 2 <= PAGE_SIZE ||
- __analize_area(fmt, width, height, &w, &h, &band, &a, &o, NULL))
- return;
-
- /* get group id */
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- mutex_unlock(&mtx);
- if (!gi)
- return;
-
- /* effective width of a buffer */
- e = ALIGN(w, a);
-
- for (i = 0; i < n && res >= 0; i += res) {
- /* blocks to allocate in one area */
- n_try = MIN(n - i, TILER_WIDTH);
- tiler_best2pack(offs, w, e, band, &n_try, NULL);
-
- res = -1;
- while (n_try > 1) {
- res = reserve_2d(fmt, n_try, w, h, band, a, o, gi,
- &gi->reserved);
- if (res >= 0)
- break;
-
- /* reduce n if failed to allocate area */
- n_try--;
- }
- }
- /* keep reserved blocks even if failed to reserve all */
-
- mutex_lock(&mtx);
- gi->refs--;
- _m_try_free_group(gi);
- mutex_unlock(&mtx);
-}
-
-s32 tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, pid_t pid)
-{
- struct process_info *pi = __get_pi(pid, true);
-
- if (pi)
- reserve_blocks(n, fmt, width, height, align, offs, gid, pi);
- return 0;
-}
-EXPORT_SYMBOL(tiler_reservex);
-
-s32 tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs)
-{
- return tiler_reservex(n, fmt, width, height, align, offs,
- 0, current->tgid);
-}
-EXPORT_SYMBOL(tiler_reserve);
-
-/* reserve area for n identical buffers */
-s32 tiler_reservex_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
- u32 gid, pid_t pid)
-{
- struct process_info *pi = __get_pi(pid, true);
-
- if (pi)
- reserve_nv12(n, width, height, align, offs, gid, pi);
- return 0;
-}
-EXPORT_SYMBOL(tiler_reservex_nv12);
-
-s32 tiler_reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs)
-{
- return tiler_reservex_nv12(n, width, height, align, offs,
- 0, current->tgid);
-}
-EXPORT_SYMBOL(tiler_reserve_nv12);
-
-void unreserve_blocks(struct process_info *pi, u32 gid)
-{
- struct gid_info *gi;
- struct mem_info *mi, *mi_;
-
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- if (!gi)
- goto done;
- /* we have the mutex, so no need to keep reference */
- gi->refs--;
-
- list_for_each_entry_safe(mi, mi_, &gi->reserved, global) {
- BUG_ON(mi->refs || mi->alloced);
- _m_free(mi);
- }
-done:
- mutex_unlock(&mtx);
-}
-
-static s32 tiler_ioctl(struct inode *ip, struct file *filp, u32 cmd,
- unsigned long arg)
-{
- pgd_t *pgd = NULL;
- pmd_t *pmd = NULL;
- pte_t *ptep = NULL, pte = 0x0;
- s32 r = -1;
- struct process_info *pi = filp->private_data;
-
- struct __buf_info *_b = NULL;
- struct tiler_buf_info buf_info = {0};
- struct tiler_block_info block_info = {0};
- struct mem_info *mi;
-
- switch (cmd) {
- case TILIOC_GBLK:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- switch (block_info.fmt) {
- case TILFMT_PAGE:
- r = alloc_block(block_info.fmt, block_info.dim.len, 1,
- block_info.align, block_info.offs,
- block_info.key, block_info.group_id,
- pi, &mi);
- if (r)
- return r;
- break;
- case TILFMT_8BIT:
- case TILFMT_16BIT:
- case TILFMT_32BIT:
- r = alloc_block(block_info.fmt,
- block_info.dim.area.width,
- block_info.dim.area.height,
- block_info.align, block_info.offs,
- block_info.key, block_info.group_id,
- pi, &mi);
- if (r)
- return r;
- break;
- default:
- return -EINVAL;
- }
-
- if (mi) {
- block_info.id = mi->blk.id;
- block_info.stride = tiler_vstride(&mi->blk);
-#ifdef CONFIG_TILER_EXPOSE_SSPTR
- block_info.ssptr = mi->blk.phys;
-#endif
- }
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
- case TILIOC_FBLK:
- case TILIOC_UMBLK:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- /* search current process first, then all processes */
- free_block(block_info.key, block_info.id, pi) ?
- free_block_global(block_info.key, block_info.id) : 0;
-
- /* free always succeeds */
- break;
-
- case TILIOC_GSSP:
- pgd = pgd_offset(current->mm, arg);
- if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
- pmd = pmd_offset(pgd, arg);
- if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
- ptep = pte_offset_map(pmd, arg);
- if (ptep) {
- pte = *ptep;
- if (pte_present(pte))
- return (pte & PAGE_MASK) |
- (~PAGE_MASK & arg);
- }
- }
- }
- /* va not in page table */
- return 0x0;
- break;
- case TILIOC_MBLK:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- if (!block_info.ptr)
- return -EFAULT;
-
- r = map_block(block_info.fmt, block_info.dim.len, 1,
- block_info.key, block_info.group_id, pi,
- &mi, (u32)block_info.ptr);
- if (r)
- return r;
-
- if (mi) {
- block_info.id = mi->blk.id;
- block_info.stride = tiler_vstride(&mi->blk);
-#ifdef CONFIG_TILER_EXPOSE_SSPTR
- block_info.ssptr = mi->blk.phys;
-#endif
- }
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
-#ifndef CONFIG_TILER_SECURE
- case TILIOC_QBUF:
- if (!offset_lookup)
- return -EPERM;
-
- if (copy_from_user(&buf_info, (void __user *)arg,
- sizeof(buf_info)))
- return -EFAULT;
-
- mutex_lock(&mtx);
- list_for_each_entry(_b, &pi->bufs, by_pid) {
- if (buf_info.offset == _b->buf_info.offset) {
- if (copy_to_user((void __user *)arg,
- &_b->buf_info,
- sizeof(_b->buf_info))) {
- mutex_unlock(&mtx);
- return -EFAULT;
- } else {
- mutex_unlock(&mtx);
- return 0;
- }
- }
- }
- mutex_unlock(&mtx);
- return -EFAULT;
- break;
-#endif
- case TILIOC_RBUF:
- _b = kmalloc(sizeof(*_b), GFP_KERNEL);
- if (!_b)
- return -ENOMEM;
-
- memset(_b, 0x0, sizeof(*_b));
-
- if (copy_from_user(&_b->buf_info, (void __user *)arg,
- sizeof(_b->buf_info))) {
- kfree(_b); return -EFAULT;
- }
-
- r = register_buf(_b, pi);
- if (r) {
- kfree(_b); return -EACCES;
- }
-
- if (copy_to_user((void __user *)arg, &_b->buf_info,
- sizeof(_b->buf_info))) {
- _m_unregister_buf(_b);
- return -EFAULT;
- }
- break;
- case TILIOC_URBUF:
- if (copy_from_user(&buf_info, (void __user *)arg,
- sizeof(buf_info)))
- return -EFAULT;
-
- mutex_lock(&mtx);
- /* buffer registration is per process */
- list_for_each_entry(_b, &pi->bufs, by_pid) {
- if (buf_info.offset == _b->buf_info.offset) {
- _m_unregister_buf(_b);
- mutex_unlock(&mtx);
- return 0;
- }
- }
- mutex_unlock(&mtx);
- return -EFAULT;
- break;
- case TILIOC_PRBLK:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- if (block_info.fmt == TILFMT_8AND16) {
- reserve_nv12(block_info.key,
- block_info.dim.area.width,
- block_info.dim.area.height,
- block_info.align,
- block_info.offs,
- block_info.group_id, pi);
- } else {
- reserve_blocks(block_info.key,
- block_info.fmt,
- block_info.dim.area.width,
- block_info.dim.area.height,
- block_info.align,
- block_info.offs,
- block_info.group_id, pi);
- }
- break;
- case TILIOC_URBLK:
- unreserve_blocks(pi, arg);
- break;
-#ifndef CONFIG_TILER_SECURE
- case TILIOC_QBLK:
- if (!ssptr_lookup)
- return -EPERM;
-
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- if (find_block(block_info.ssptr, &block_info))
- return -EFAULT;
-
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return 0x0;
-}
-
-s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 key, u32 gid, struct process_info *pi,
- struct mem_info **info)
-{
- struct mem_info *mi = NULL;
- struct gid_info *gi = NULL;
-
- *info = NULL;
-
- /* only support up to page alignment */
- if (align > PAGE_SIZE || offs >= (align ? : default_align) || !pi)
- return -EINVAL;
-
- /* get group context */
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- mutex_unlock(&mtx);
-
- if (!gi)
- return -ENOMEM;
-
- /* reserve area in tiler container */
- mi = __get_area(fmt, width, height, align, offs, gi);
- if (!mi) {
- mutex_lock(&mtx);
- gi->refs--;
- _m_try_free_group(gi);
- mutex_unlock(&mtx);
- return -ENOMEM;
- }
-
- mi->blk.width = width;
- mi->blk.height = height;
- mi->blk.key = key;
- if (ssptr_id) {
- mi->blk.id = mi->blk.phys;
- } else {
- mutex_lock(&mtx);
- mi->blk.id = _m_get_id();
- mutex_unlock(&mtx);
- }
-
- /* allocate and map if mapping is supported */
- if (tmm_can_map(TMM(fmt))) {
- mi->num_pg = tcm_sizeof(mi->area);
-
- mi->mem = tmm_get(TMM(fmt), mi->num_pg);
- if (!mi->mem)
- goto cleanup;
-
- /* Ensure the data reaches to main memory before PAT refill */
- wmb();
-
- /* program PAT */
- if (refill_pat(TMM(fmt), &mi->area, mi->mem))
- goto cleanup;
- }
- *info = mi;
- return 0;
-
-cleanup:
- mutex_lock(&mtx);
- _m_free(mi);
- mutex_unlock(&mtx);
- return -ENOMEM;
-
-}
-
-s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt,
- u32 align, u32 offs, u32 gid, pid_t pid)
-{
- struct mem_info *mi;
- struct process_info *pi;
- s32 res;
-
- if (!blk || blk->phys)
- BUG();
-
- pi = __get_pi(pid, true);
- if (!pi)
- return -ENOMEM;
-
- res = alloc_block(fmt, blk->width, blk->height, align, offs, blk->key,
- gid, pi, &mi);
- if (mi)
- blk->phys = mi->blk.phys;
- return res;
-}
-EXPORT_SYMBOL(tiler_allocx);
-
-s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt,
- u32 align, u32 offs)
-{
- return tiler_allocx(blk, fmt, align, offs, 0, current->tgid);
-}
-EXPORT_SYMBOL(tiler_alloc);
-
-static void __exit tiler_exit(void)
-{
- struct process_info *pi = NULL, *pi_ = NULL;
- int i, j;
-
- mutex_lock(&mtx);
-
- /* free all process data */
- list_for_each_entry_safe(pi, pi_, &procs, list)
- _m_free_process_info(pi);
-
- /* all lists should have cleared */
- BUG_ON(!list_empty(&blocks));
- BUG_ON(!list_empty(&procs));
- BUG_ON(!list_empty(&orphan_onedim));
- BUG_ON(!list_empty(&orphan_areas));
-
- mutex_unlock(&mtx);
-
- /* close containers only once */
- for (i = TILFMT_8BIT; i <= TILFMT_MAX; i++) {
- /* remove identical containers (tmm is unique per tcm) */
- for (j = i + 1; j <= TILFMT_MAX; j++)
- if (TCM(i) == TCM(j)) {
- TCM_SET(j, NULL);
- TMM_SET(j, NULL);
- }
-
- tcm_deinit(TCM(i));
- tmm_deinit(TMM(i));
- }
-
- mutex_destroy(&mtx);
- platform_driver_unregister(&tiler_driver_ldm);
- cdev_del(&tiler_device->cdev);
- kfree(tiler_device);
- device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
- class_destroy(tilerdev_class);
-}
-
-static s32 tiler_open(struct inode *ip, struct file *filp)
-{
- struct process_info *pi = __get_pi(current->tgid, false);
-
- if (!pi)
- return -ENOMEM;
-
- filp->private_data = pi;
- return 0x0;
-}
-
-static s32 tiler_release(struct inode *ip, struct file *filp)
-{
- struct process_info *pi = filp->private_data;
-
- mutex_lock(&mtx);
- /* free resources if last device in this process */
- if (0 == --pi->refs)
- _m_free_process_info(pi);
-
- mutex_unlock(&mtx);
-
- return 0x0;
-}
-
-static const struct file_operations tiler_fops = {
- .open = tiler_open,
- .ioctl = tiler_ioctl,
- .release = tiler_release,
- .mmap = tiler_mmap,
-};
+/*
+ * Driver code
+ * ==========================================================================
+ */
static s32 __init tiler_init(void)
{
@@ -2051,10 +1131,7 @@ static s32 __init tiler_init(void)
granularity < 1 || granularity > PAGE_SIZE ||
granularity & (granularity - 1))
return -EINVAL;
-#ifdef CONFIG_TILER_SECURE
- security = true;
- offset_lookup = ssptr_lookup = false;
-#endif
+
/* Allocate tiler container manager (we share 1 on OMAP4) */
div_pt.x = TILER_WIDTH; /* hardcoded default */
div_pt.y = (3 * TILER_HEIGHT) / 4;
@@ -2110,7 +1187,7 @@ static s32 __init tiler_init(void)
mutex_init(&mtx);
INIT_LIST_HEAD(&blocks);
- INIT_LIST_HEAD(&procs);
+ tiler_proc_init();
INIT_LIST_HEAD(&orphan_areas);
INIT_LIST_HEAD(&orphan_onedim);
@@ -2125,6 +1202,43 @@ error:
return r;
}
+static void __exit tiler_exit(void)
+{
+ int i, j;
+
+ mutex_lock(&mtx);
+
+ /* free all process data */
+ destroy_processes();
+
+ /* all lists should have cleared */
+ BUG_ON(!list_empty(&blocks));
+ BUG_ON(!list_empty(&orphan_onedim));
+ BUG_ON(!list_empty(&orphan_areas));
+
+ mutex_unlock(&mtx);
+
+ /* close containers only once */
+ for (i = TILFMT_8BIT; i <= TILFMT_MAX; i++) {
+ /* remove identical containers (tmm is unique per tcm) */
+ for (j = i + 1; j <= TILFMT_MAX; j++)
+ if (TCM(i) == TCM(j)) {
+ TCM_SET(j, NULL);
+ TMM_SET(j, NULL);
+ }
+
+ tcm_deinit(TCM(i));
+ tmm_deinit(TMM(i));
+ }
+
+ mutex_destroy(&mtx);
+ platform_driver_unregister(&tiler_driver_ldm);
+ cdev_del(&tiler_device->cdev);
+ kfree(tiler_device);
+ device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
+ class_destroy(tilerdev_class);
+}
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("David Sin <davidsin@ti.com>");
MODULE_AUTHOR("Lajos Molnar <molnar@ti.com>");
diff --git a/drivers/media/video/tiler/tiler-reserve.c b/drivers/media/video/tiler/tiler-reserve.c
index ddc6b7d38eb..268a15806f9 100644
--- a/drivers/media/video/tiler/tiler-reserve.c
+++ b/drivers/media/video/tiler/tiler-reserve.c
@@ -16,17 +16,31 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <mach/tiler.h>
#include "tiler-def.h"
+#include "_tiler.h"
/* we want to find the most effective packing for the smallest area */
+/* we have two algorithms for packing nv12 blocks */
+
+/* we want to find the most effective packing for the smallest area */
+
+static inline u32 nv12_eff(u16 n, u16 w, u16 area, u16 n_need)
+{
+ /* rank by total area needed first */
+ return 0x10000000 - DIV_ROUND_UP(n_need, n) * area * 32 +
+ /* then by efficiency */
+ 1024 * n * ((w * 3 + 1) >> 1) / area;
+}
+
/* This method is used for both 2D and NV12 packing */
/* return maximum buffers that can be packed next to each other */
/* o(ffset), w(idth), e(ff_width), b(and), n(um blocks), area( needed) */
/* assumptions: w > 0, o < a <= e */
-u32 tiler_best2pack(u16 o, u16 w, u16 e, u16 b, u16 *n, u16 *area)
+static u32 tiler_best2pack(u16 o, u16 w, u16 e, u16 b, u16 *n, u16 *area)
{
u16 m = 0, max_n = *n; /* m is mostly n - 1 */
u32 eff, best_eff = 0; /* best values */
@@ -61,7 +75,7 @@ u32 tiler_best2pack(u16 o, u16 w, u16 e, u16 b, u16 *n, u16 *area)
/* nv12 packing algorithm 1: pack 8 and 16 bit block into separate areas */
/* assumptions: w > 0, o < a, 2 <= a */
-u16 nv12_separate(u16 o, u16 w, u16 a, u16 n, u16 *area)
+static u16 nv12_separate(u16 o, u16 w, u16 a, u16 n, u16 *area)
{
tiler_best2pack(o, w, ALIGN(w, a), 64, &n, area);
tiler_best2pack(o / 2, (w + 1) / 2, ALIGN(w, a) / 2, 32, &n, area);
@@ -196,7 +210,7 @@ static int nv12_D(u16 o, u16 w, u16 a, u16 *area, u16 n, u8 *p)
/* nv12 packing algorithm 2: pack 8 and 16 bit block into same areas */
/* assumptions: w > 0, o < a, 2 <= a, packing has at least MAX_ANY * 2 bytes */
-u16 nv12_together(u16 o, u16 w, u16 a, u16 n, u16 *area, u8 *packing)
+static u16 nv12_together(u16 o, u16 w, u16 a, u16 n, u16 *area, u8 *packing)
{
u16 n_best, n2, a_, o_, w_;
@@ -284,3 +298,150 @@ u16 nv12_together(u16 o, u16 w, u16 a, u16 n, u16 *area, u8 *packing)
return n_best;
}
+
+/* can_together: 8-bit and 16-bit views are in the same container */
+void reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, struct process_info *pi, bool can_together)
+{
+ /* adjust alignment to at least 128 bytes (16-bit slot width) */
+ u16 w, h, band, a = MAX(128, align), o = offs, eff_w;
+ struct gid_info *gi;
+ int res = 0, res2, i;
+ u16 n_t, n_s, area_t, area_s;
+ u8 packing[2 * 21];
+ struct list_head reserved = LIST_HEAD_INIT(reserved);
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ offs >= align || offs & 1 ||
+ align >= PAGE_SIZE ||
+ n > TILER_WIDTH * TILER_HEIGHT / 2)
+ return;
+
+ /* calculate dimensions, band, offs and alignment in slots */
+ if (__analize_area(TILFMT_8BIT, width, height, &w, &h, &band, &a, &o,
+ NULL))
+ return;
+
+ /* get group context */
+ gi = get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ eff_w = ALIGN(w, a);
+
+ for (i = 0; i < n && res >= 0; i += res) {
+ /* check packing separately vs together */
+ n_s = nv12_separate(o, w, a, n - i, &area_s);
+ if (can_together)
+ n_t = nv12_together(o, w, a, n - i, &area_t, packing);
+ else
+ n_t = 0;
+
+ /* pack based on better efficiency */
+ res = -1;
+ if (!can_together ||
+ nv12_eff(n_s, w, area_s, n - i) >
+ nv12_eff(n_t, w, area_t, n - i)) {
+
+ /* reserve blocks separately into a temporary list,
+ so that we can free them if unsuccessful */
+ res = reserve_2d(TILFMT_8BIT, n_s, w, h, band, a, o, gi,
+ &reserved);
+
+ /* only reserve 16-bit blocks if 8-bit was successful,
+ as we will try to match 16-bit areas to an already
+ reserved 8-bit area, and there is no guarantee that
+ an unreserved 8-bit area will match the offset of
+ a singly reserved 16-bit area. */
+ res2 = (res < 0 ? res :
+ reserve_2d(TILFMT_16BIT, n_s, (w + 1) / 2, h,
+ band / 2, a / 2, o / 2, gi, &reserved));
+ if (res2 < 0 || res != res2) {
+ /* clean up */
+ release_blocks(&reserved);
+ res = -1;
+ } else {
+ /* add list to reserved */
+ add_reserved_blocks(&reserved, gi);
+ }
+ }
+
+ /* if separate packing failed, still try to pack together */
+ if (res < 0 && can_together && n_t) {
+ /* pack together */
+ res = pack_nv12(n_t, area_t, w, h, gi, packing);
+ }
+ }
+
+ release_gi(gi);
+}
+
+/* reserve 2d blocks (if standard allocator is inefficient) */
+void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid,
+ struct process_info *pi)
+{
+ u32 til_width, bpp, bpt, res = 0, i;
+ u16 o = offs, a = align, band, w, h, e, n_try;
+ struct gid_info *gi;
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ align > PAGE_SIZE || offs >= align ||
+ fmt < TILFMT_8BIT || fmt > TILFMT_32BIT)
+ return;
+
+ /* tiler page width in pixels, bytes per pixel, tiler page in bytes */
+ til_width = fmt == TILFMT_32BIT ? 32 : 64;
+ bpp = 1 << (fmt - TILFMT_8BIT);
+ bpt = til_width * bpp;
+
+ /* check offset. Also, if block is less than half the mapping window,
+ the default allocation is sufficient. Also check for basic area
+ info. */
+ if (width * bpp * 2 <= PAGE_SIZE ||
+ __analize_area(fmt, width, height, &w, &h, &band, &a, &o, NULL))
+ return;
+
+ /* get group id */
+ gi = get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ /* effective width of a buffer */
+ e = ALIGN(w, a);
+
+ for (i = 0; i < n && res >= 0; i += res) {
+ /* blocks to allocate in one area */
+ n_try = MIN(n - i, TILER_WIDTH);
+ tiler_best2pack(offs, w, e, band, &n_try, NULL);
+
+ res = -1;
+ while (n_try > 1) {
+ res = reserve_2d(fmt, n_try, w, h, band, a, o, gi,
+ &gi->reserved);
+ if (res >= 0)
+ break;
+
+ /* reduce n if failed to allocate area */
+ n_try--;
+ }
+ }
+ /* keep reserved blocks even if failed to reserve all */
+
+ release_gi(gi);
+}
+
+void unreserve_blocks(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+
+ gi = get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ release_blocks(&gi->reserved);
+
+ release_gi(gi);
+}
diff --git a/drivers/media/video/tiler/tiler-reserve.h b/drivers/media/video/tiler/tiler-reserve.h
deleted file mode 100644
index e454b1d240f..00000000000
--- a/drivers/media/video/tiler/tiler-reserve.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __TILER_PACK_H__
-#define __TILER_PACK_H__
-
-u32 tiler_best2pack(u16 o, u16 w, u16 e, u16 b, u16 *n, u16 *area);
-
-u16 nv12_together(u16 o, u16 w, u16 a, u16 n, u16 *area, u8 *packing);
-
-u16 nv12_separate(u16 o, u16 w, u16 a, int n, u16 *area);
-
-
-#endif
diff --git a/drivers/media/video/tiler/tmm.h b/drivers/media/video/tiler/tmm.h
index deaeca51a90..80d7001c14b 100644
--- a/drivers/media/video/tiler/tmm.h
+++ b/drivers/media/video/tiler/tmm.h
@@ -24,11 +24,11 @@ struct tmm {
void *pvt;
/* function table */
- u32 *(*get) (struct tmm *tmm, s32 num_pages);
- void (*free) (struct tmm *tmm, u32 *pages);
- s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa);
- void (*clear) (struct tmm *tmm, struct pat_area area);
- void (*deinit) (struct tmm *tmm);
+ u32 *(*get) (struct tmm *tmm, s32 num_pages);
+ void (*free) (struct tmm *tmm, u32 *pages);
+ s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa);
+ void (*clear) (struct tmm *tmm, struct pat_area area);
+ void (*deinit) (struct tmm *tmm);
};
/**