summaryrefslogtreecommitdiff
path: root/drivers/video/b2r2
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/b2r2')
-rw-r--r--drivers/video/b2r2/Kconfig134
-rw-r--r--drivers/video/b2r2/Makefile15
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c3363
-rw-r--r--drivers/video/b2r2/b2r2_core.c2819
-rw-r--r--drivers/video/b2r2/b2r2_core.h108
-rw-r--r--drivers/video/b2r2/b2r2_debug.c338
-rw-r--r--drivers/video/b2r2/b2r2_debug.h102
-rw-r--r--drivers/video/b2r2/b2r2_filters.c376
-rw-r--r--drivers/video/b2r2/b2r2_filters.h73
-rw-r--r--drivers/video/b2r2/b2r2_generic.c3334
-rw-r--r--drivers/video/b2r2/b2r2_generic.h51
-rw-r--r--drivers/video/b2r2/b2r2_global.h119
-rw-r--r--drivers/video/b2r2/b2r2_hw.h707
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.c496
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.h31
-rw-r--r--drivers/video/b2r2/b2r2_internal.h590
-rw-r--r--drivers/video/b2r2/b2r2_kernel_if.c37
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.c668
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.h161
-rw-r--r--drivers/video/b2r2/b2r2_node_gen.c83
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c3734
-rw-r--r--drivers/video/b2r2/b2r2_node_split.h124
-rw-r--r--drivers/video/b2r2/b2r2_profiler/Makefile3
-rw-r--r--drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c270
-rw-r--r--drivers/video/b2r2/b2r2_profiler_api.h66
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.c106
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.h22
-rw-r--r--drivers/video/b2r2/b2r2_structures.h226
-rw-r--r--drivers/video/b2r2/b2r2_timing.c22
-rw-r--r--drivers/video/b2r2/b2r2_timing.h22
-rw-r--r--drivers/video/b2r2/b2r2_utils.c633
-rw-r--r--drivers/video/b2r2/b2r2_utils.h66
32 files changed, 18899 insertions, 0 deletions
diff --git a/drivers/video/b2r2/Kconfig b/drivers/video/b2r2/Kconfig
new file mode 100644
index 00000000000..8cc81876de7
--- /dev/null
+++ b/drivers/video/b2r2/Kconfig
@@ -0,0 +1,134 @@
+config FB_B2R2
+ tristate "B2R2 engine support"
+ default n
+ help
+ B2R2 engine does various bit-blitting operations,post-processor operations
+ and various compositions.
+
+config B2R2_PLUG_CONF
+ bool "B2R2 bus plug configuration"
+ depends on FB_B2R2
+ default n
+ help
+ Configures how B2R2 access the memory bus. Enabling this will increase
+ the performance of B2R2 at the cost of using the bus more heavily.
+
+ If this is set to 'n', the hardware defaults will be used.
+
+choice
+ prompt "Opcode size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_OPSIZE_64
+
+ config B2R2_OPSIZE_8
+ bool "8 bytes"
+ config B2R2_OPSIZE_16
+ bool "16 bytes"
+ config B2R2_OPSIZE_32
+ bool "32 bytes"
+ config B2R2_OPSIZE_64
+ bool "64 bytes"
+
+endchoice
+
+choice
+ prompt "Chunk size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_CHSIZE_128
+
+ config B2R2_CHSIZE_1
+ bool "1 op"
+ config B2R2_CHSIZE_2
+ bool "2 ops"
+ config B2R2_CHSIZE_4
+ bool "4 ops"
+ config B2R2_CHSIZE_8
+ bool "8 ops"
+ config B2R2_CHSIZE_16
+ bool "16 ops"
+ config B2R2_CHSIZE_32
+ bool "32 ops"
+ config B2R2_CHSIZE_64
+ bool "64 ops"
+ config B2R2_CHSIZE_128
+ bool "128 ops"
+endchoice
+
+choice
+ prompt "Message size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_MGSIZE_128
+
+ config B2R2_MGSIZE_1
+ bool "1 chunk"
+ config B2R2_MGSIZE_2
+ bool "2 chunks"
+ config B2R2_MGSIZE_4
+ bool "4 chunks"
+ config B2R2_MGSIZE_8
+ bool "8 s"
+ config B2R2_MGSIZE_16
+ bool "16 chunks"
+ config B2R2_MGSIZE_32
+ bool "32 chunks"
+ config B2R2_MGSIZE_64
+ bool "64 chunks"
+ config B2R2_MGSIZE_128
+ bool "128 chunks"
+endchoice
+
+choice
+ prompt "Page size"
+ depends on B2R2_PLUG_CONF
+ default B2R2_PGSIZE_256
+
+ config B2R2_PGSIZE_64
+ bool "64 bytes"
+ config B2R2_PGSIZE_128
+ bool "128 bytes"
+ config B2R2_PGSIZE_256
+ bool "256 bytes"
+endchoice
+
+config B2R2_DEBUG
+ bool "B2R2 debugging"
+ default n
+ depends on FB_B2R2
+ help
+ Enable debugging features for the B2R2 driver.
+
+config B2R2_PROFILER
+ tristate "B2R2 profiler"
+ default n
+ depends on FB_B2R2
+ help
+ Enables the profiler for the B2R2 driver.
+
+ It is recommended to build this as a module, since the configuration
+ of filters etc. is done at load time.
+
+config B2R2_GENERIC
+ bool "B2R2 generic path"
+ default y
+ depends on FB_B2R2
+ help
+ Enables support for the generic path in the B2R2 driver. This path should
+ be used when there is no optimized implementation for a request.
+
+choice
+ prompt "Generic usage mode"
+ depends on B2R2_GENERIC
+ default B2R2_GENERIC_FALLBACK
+
+ config B2R2_GENERIC_FALLBACK
+ bool "Fallback"
+ help
+ The optimized path will be used for all supported operations, and the
+ generic path will be used as a fallback for the ones not implemented.
+
+ config B2R2_GENERIC_ONLY
+ bool "Always"
+ help
+ The generic path will be used for all operations.
+
+endchoice
diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile
new file mode 100644
index 00000000000..0150ad6f761
--- /dev/null
+++ b/drivers/video/b2r2/Makefile
@@ -0,0 +1,15 @@
+# Make file for compiling and loadable module B2R2
+
+obj-$(CONFIG_FB_B2R2) += b2r2.o
+
+b2r2-objs = b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_filters.o b2r2_utils.o b2r2_input_validation.o
+
+ifdef CONFIG_B2R2_DEBUG
+b2r2-objs += b2r2_debug.o
+endif
+
+ifeq ($(CONFIG_FB_B2R2),m)
+obj-y += b2r2_kernel_if.o
+endif
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler/
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
new file mode 100644
index 00000000000..f79bfaee9ab
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -0,0 +1,3363 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 Blitter module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <asm/cacheflush.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/hwmem.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_node_split.h"
+#include "b2r2_generic.h"
+#include "b2r2_mem_alloc.h"
+#include "b2r2_profiler_socket.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_core.h"
+#include "b2r2_filters.h"
+
+#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
+#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE)
+
+/*
+ * TODO:
+ * Implementation of query cap
+ * Support for user space virtual pointer to physically consecutive memory
+ * Support for user space virtual pointer to physically scattered memory
+ * Callback reads lagging behind in blt_api_stress app
+ * Store smaller items in the report list instead of the whole request
+ * Support read of many report records at once.
+ */
+
+/**
+ * b2r2_blt_dev - Our device(s), /dev/b2r2_blt
+ */
+static struct b2r2_control *b2r2_ctl[B2R2_MAX_NBR_DEVICES];
+
+/* Debug file system support */
+#ifdef CONFIG_DEBUG_FS
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size);
+#endif
+
+/* Local functions */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat);
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat);
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id);
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap);
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback(struct b2r2_core_job *job);
+static void job_release(struct b2r2_core_job *job);
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources(struct b2r2_core_job *job, bool atomic);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request);
+
+static void job_callback_gen(struct b2r2_core_job *job);
+static void job_release_gen(struct b2r2_core_job *job);
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic);
+static void tile_job_callback_gen(struct b2r2_core_job *job);
+static void tile_job_release_gen(struct b2r2_core_job *job);
+#endif
+
+
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst, struct b2r2_resolved_buf *resolved);
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf, struct b2r2_resolved_buf *resolved);
+static void sync_buf(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved, bool is_dst,
+ struct b2r2_blt_rect *rect);
+static bool is_report_list_empty(struct b2r2_blt_instance *instance);
+static bool is_synching(struct b2r2_blt_instance *instance);
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect);
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region);
+static int resolve_hwmem(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used, bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf);
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf);
+
+/**
+ * struct sync_args - Data for clean/flush
+ *
+ * @start: Virtual start address
+ * @end: Virtual end address
+ */
+struct sync_args {
+ unsigned long start;
+ unsigned long end;
+};
+/**
+ * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the
+ * current CPU
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void flush_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_flush_range((void *)sa->start, (void *)sa->end);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * inv_l1_cache_range_all_cpus() - Cleans and invalidates L1 cache on all CPU:s
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void flush_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(flush_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * clean_l1_cache_range_curr_cpu() - Cleans L1 cache on current CPU
+ *
+ * Ensures that data is written out from the CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @arg: Pointer to sync_args structure
+ */
+static inline void clean_l1_cache_range_curr_cpu(void *arg)
+{
+ struct sync_args *sa = (struct sync_args *)arg;
+
+ dmac_map_area((void *)sa->start,
+ (void *)sa->end - (void *)sa->start,
+ DMA_TO_DEVICE);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * clean_l1_cache_range_all_cpus() - Cleans L1 cache on all CPU:s
+ *
+ * Ensures that data is written out from all CPU:s L1 cache,
+ * it will still be in the cache.
+ *
+ * @sa: Pointer to sync_args structure
+ */
+static void clean_l1_cache_range_all_cpus(struct sync_args *sa)
+{
+ on_each_cpu(clean_l1_cache_range_curr_cpu, sa, 1);
+}
+#endif
+
+/**
+ * b2r2_blt_open - Implements file open on the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * A B2R2 BLT instance is created and stored in the file structure.
+ */
+static int b2r2_blt_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_control *cont = filp->private_data;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_open);
+
+ /* Allocate and initialize the instance */
+ instance = (struct b2r2_blt_instance *)
+ kmalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance) {
+ b2r2_log_err(cont->dev, "%s: Failed to alloc\n", __func__);
+ goto instance_alloc_failed;
+ }
+ memset(instance, 0, sizeof(*instance));
+ INIT_LIST_HEAD(&instance->report_list);
+ mutex_init(&instance->lock);
+ init_waitqueue_head(&instance->report_list_waitq);
+ init_waitqueue_head(&instance->synch_done_waitq);
+ instance->control = cont;
+
+ /*
+ * Remember the instance so that we can retrieve it in
+ * other functions
+ */
+ filp->private_data = instance;
+ goto out;
+
+instance_alloc_failed:
+out:
+ dec_stat(cont, &cont->stat_n_in_open);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_release - Implements last close on an instance of
+ * the b2r2_blt device
+ *
+ * @inode: File system inode
+ * @filp: File pointer
+ *
+ * All active jobs are finished or cancelled and allocated data
+ * is released.
+ */
+static int b2r2_blt_release(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_release);
+
+ /* Finish all outstanding requests */
+ ret = b2r2_blt_synch(instance, 0);
+ if (ret < 0)
+ b2r2_log_warn(cont->dev, "%s: b2r2_blt_sync failed with %d\n",
+ __func__, ret);
+
+ /* Now cancel any remaining outstanding request */
+ if (instance->no_of_active_requests) {
+ struct b2r2_core_job *job;
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests\n", __func__,
+ instance->no_of_active_requests);
+
+ /* Find and cancel all jobs belonging to us */
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ while (job) {
+ b2r2_core_job_cancel(job);
+ /* Matches addref in b2r2_core_job_find... */
+ b2r2_core_job_release(job, __func__);
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
+ }
+
+ b2r2_log_warn(cont->dev, "%s: %d active requests after "
+ "cancel\n", __func__, instance->no_of_active_requests);
+ }
+
+ /* Release jobs in report list */
+ mutex_lock(&instance->lock);
+ while (!list_empty(&instance->report_list)) {
+ struct b2r2_blt_request *request = list_first_entry(
+ &instance->report_list,
+ struct b2r2_blt_request,
+ list);
+ list_del_init(&request->list);
+ mutex_unlock(&instance->lock);
+ /*
+ * This release matches the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ mutex_lock(&instance->lock);
+ }
+ mutex_unlock(&instance->lock);
+
+ /* Release our instance */
+ kfree(instance);
+
+ dec_stat(cont, &cont->stat_n_in_release);
+
+ return 0;
+}
+
+/**
+ * b2r2_blt_ioctl - This routine implements b2r2_blt ioctl interface
+ *
+ * @file: file pointer.
+ * @cmd :ioctl command.
+ * @arg: input argument for ioctl.
+ *
+ * Returns 0 if OK else negative error code
+ */
+static long b2r2_blt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) file->private_data;
+ struct b2r2_control *cont = instance->control;
+
+ /** Process actual ioctl */
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Get the instance from the file structure */
+ switch (cmd) {
+ case B2R2_BLT_IOC: {
+ /* This is the "blit" command */
+
+ /* arg is user pointer to struct b2r2_blt_request */
+ struct b2r2_blt_request *request =
+ kmalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ b2r2_log_err(cont->dev, "%s: Failed to alloc mem\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request, 0, sizeof(*request));
+ INIT_LIST_HEAD(&request->list);
+ request->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request->user_req, (void *)arg,
+ sizeof(request->user_req))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
+ __func__);
+ kfree(request);
+ return -EFAULT;
+ }
+
+ if (!b2r2_validate_user_req(cont, &request->user_req)) {
+ kfree(request);
+ return -EINVAL;
+ }
+
+ request->profile = is_profiler_registered_approx();
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ request->clut = dma_alloc_coherent(cont->dev,
+ CLUT_SIZE, &(request->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request->clut == NULL) {
+ b2r2_log_err(cont->dev, "%s CLUT allocation "
+ "failed.\n", __func__);
+ kfree(request);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request->clut,
+ request->user_req.clut, CLUT_SIZE)) {
+ b2r2_log_err(cont->dev, "%s: CLUT "
+ "copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ kfree(request);
+ return -EFAULT;
+ }
+ }
+
+ /* Perform the blit */
+
+#ifdef CONFIG_B2R2_GENERIC_ONLY
+ /* Use the generic path for all operations */
+ ret = b2r2_generic_blt(instance, request);
+#else
+ /* Use the optimized path */
+ ret = b2r2_blt(instance, request);
+#endif
+
+#ifdef CONFIG_B2R2_GENERIC_FALLBACK
+ /* Fall back to generic path if operation was not supported */
+ if (ret == -ENOSYS) {
+ struct b2r2_blt_request *request_gen;
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* No support for BG BLEND in generic
+ * implementation yet */
+ b2r2_log_warn(cont->dev, "%s: Unsupported: "
+ "Background blend in b2r2_generic_blt\n",
+ __func__);
+ return ret;
+ }
+
+ b2r2_log_info(cont->dev,
+ "b2r2_blt=%d Going generic.\n", ret);
+ request_gen = kmalloc(sizeof(*request_gen), GFP_KERNEL);
+ if (!request_gen) {
+ b2r2_log_err(cont->dev,
+ "%s: Failed to alloc mem for "
+ "request_gen\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* Initialize the structure */
+ memset(request_gen, 0, sizeof(*request_gen));
+ INIT_LIST_HEAD(&request_gen->list);
+ request_gen->instance = instance;
+
+ /*
+ * The user request is a sub structure of the
+ * kernel request structure.
+ */
+
+ /* Get the user data */
+ if (copy_from_user(&request_gen->user_req, (void *)arg,
+ sizeof(request_gen->user_req))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user "
+ "failed\n", __func__);
+ kfree(request_gen);
+ return -EFAULT;
+ }
+
+ /*
+ * If the user specified a color look-up table,
+ * make a copy that the HW can use.
+ */
+ if ((request_gen->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION)
+ != 0) {
+ request_gen->clut = dma_alloc_coherent(
+ cont->dev, CLUT_SIZE,
+ &(request_gen->clut_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (request_gen->clut == NULL) {
+ b2r2_log_err(cont->dev, "%s CLUT "
+ "allocation failed.\n",
+ __func__);
+ kfree(request_gen);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(request_gen->clut,
+ request_gen->user_req.clut,
+ CLUT_SIZE)) {
+ b2r2_log_err(cont->dev, "%s: CLUT"
+ " copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request_gen->clut,
+ request_gen->clut_phys_addr);
+ request_gen->clut = NULL;
+ request_gen->clut_phys_addr = 0;
+ kfree(request_gen);
+ return -EFAULT;
+ }
+ }
+
+ request_gen->profile = is_profiler_registered_approx();
+
+ ret = b2r2_generic_blt(instance, request_gen);
+ b2r2_log_info(cont->dev, "\nb2r2_generic_blt=%d "
+ "Generic done.\n", ret);
+ }
+#endif /* CONFIG_B2R2_GENERIC_FALLBACK */
+
+ break;
+ }
+
+ case B2R2_BLT_SYNCH_IOC:
+ /* arg is request_id */
+ ret = b2r2_blt_synch(instance, (int) arg);
+ break;
+
+ case B2R2_BLT_QUERY_CAP_IOC:
+ {
+ /* Arg is struct b2r2_blt_query_cap */
+ struct b2r2_blt_query_cap query_cap;
+
+ /* Get the user data */
+ if (copy_from_user(&query_cap, (void *)arg,
+ sizeof(query_cap))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* Fill in our capabilities */
+ ret = b2r2_blt_query_cap(instance, &query_cap);
+
+ /* Return data to user */
+ if (copy_to_user((void *)arg, &query_cap,
+ sizeof(query_cap))) {
+ b2r2_log_err(cont->dev, "%s: copy_to_user failed\n",
+ __func__);
+ return -EFAULT;
+ }
+ break;
+ }
+
+ default:
+ /* Unknown command */
+ b2r2_log_err(cont->dev, "%s: Unknown cmd %d\n", __func__, cmd);
+ ret = -EINVAL;
+ break;
+
+ }
+
+ if (ret < 0)
+ b2r2_log_err(cont->dev, "EC %d OK!\n", -ret);
+
+ return ret;
+}
+
+/**
+ * b2r2_blt_poll - Support for user-space poll, select & epoll.
+ * Used for user-space callback
+ *
+ * @filp: File to poll on
+ * @wait: Poll table to wait on
+ *
+ * This function checks if there are anything to read
+ */
+static unsigned b2r2_blt_poll(struct file *filp, poll_table *wait)
+{
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+ unsigned int mask = 0;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ poll_wait(filp, &instance->report_list_waitq, wait);
+ mutex_lock(&instance->lock);
+ if (!list_empty(&instance->report_list))
+ mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&instance->lock);
+
+ return mask;
+}
+
+/**
+ * b2r2_blt_read - Read report data, user for user-space callback
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ int ret = 0;
+ struct b2r2_blt_request *request = NULL;
+ struct b2r2_blt_report report;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * We return only complete report records, one at a time.
+ * Might be more efficient to support read of many.
+ */
+ count = (count / sizeof(struct b2r2_blt_report)) *
+ sizeof(struct b2r2_blt_report);
+ if (count > sizeof(struct b2r2_blt_report))
+ count = sizeof(struct b2r2_blt_report);
+ if (count == 0)
+ return count;
+
+ /*
+ * Loop and wait here until we have anything to return or
+ * until interrupted
+ */
+ mutex_lock(&instance->lock);
+ while (list_empty(&instance->report_list)) {
+ mutex_unlock(&instance->lock);
+
+ /* Return if non blocking read */
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__);
+ if (wait_event_interruptible(
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
+ /* signal: tell the fs layer to handle it */
+ return -ERESTARTSYS;
+
+ /* Otherwise loop, but first reaquire the lock */
+ mutex_lock(&instance->lock);
+ }
+
+ if (!list_empty(&instance->report_list))
+ request = list_first_entry(
+ &instance->report_list, struct b2r2_blt_request, list);
+
+ if (request) {
+ /* Remove from list to avoid reading twice */
+ list_del_init(&request->list);
+
+ report.request_id = request->request_id;
+ report.report1 = request->user_req.report1;
+ report.report2 = request->user_req.report2;
+ report.usec_elapsed = 0; /* TBD */
+
+ mutex_unlock(&instance->lock);
+ if (copy_to_user(buf, &report, sizeof(report)))
+ ret = -EFAULT;
+ mutex_lock(&instance->lock);
+
+ if (ret < 0) {
+ /* copy to user failed, re-insert into list */
+ list_add(&request->list,
+ &request->instance->report_list);
+ request = NULL;
+ }
+ }
+ mutex_unlock(&instance->lock);
+
+ if (request)
+ /*
+ * Release matching the addref when the job was put into
+ * the report list
+ */
+ b2r2_core_job_release(&request->job, __func__);
+
+ return count;
+}
+
+/**
+ * b2r2_blt_fops - File operations for b2r2_blt
+ */
+static const struct file_operations b2r2_blt_fops = {
+ .owner = THIS_MODULE,
+ .open = b2r2_blt_open,
+ .release = b2r2_blt_release,
+ .unlocked_ioctl = b2r2_blt_ioctl,
+ .poll = b2r2_blt_poll,
+ .read = b2r2_blt_read,
+};
+
+#ifndef CONFIG_B2R2_GENERIC_ONLY
+/**
+ * b2r2_blt - Implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x src.buf={%d,%d,%d} "
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev,
+ "bg.fmt=%#010x bg.buf={%d,%d,%d} "
+ "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n",
+ request->user_req.bg_img.fmt,
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,%d} "
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect,
+ false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Background buffer */
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = resolve_buf(cont, &request->user_req.bg_img,
+ &request->user_req.bg_rect,
+ false, &request->bg_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve bg buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_bg_buf_failed;
+ }
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src mask buf failed,"
+ " %d\n", __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev, "bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->bg_resolved.physical_address,
+ request->bg_resolved.virtual_address,
+ request->bg_resolved.is_pmem,
+ request->bg_resolved.filep,
+ request->bg_resolved.file_physical_start,
+ request->bg_resolved.file_virtual_start,
+ request->bg_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE, &node_count,
+ &request->bufs, &request->buf_count,
+ &request->node_split_job);
+ if (ret == -ENOSYS) {
+ /* There was no optimized path for this request */
+ b2r2_log_info(cont->dev, "%s: No optimized path for request\n",
+ __func__);
+ goto no_optimized_path;
+
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to analyze request,"
+ " ret = %d\n", __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Analyze failed for:\n",
+ __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the"
+ " request. Message buffer"
+ " allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont,
+ node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Failed to allocate nodes,"
+ " ret = %d\n", __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Build the B2R2 node list */
+ ret = b2r2_node_split_configure(cont, &request->node_split_job,
+ request->first_node);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s:"
+ " Failed to perform node split, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+
+ /* Exit here if dry run */
+ if (request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /* Configure the request */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback;
+ request->job.release = job_release;
+ request->job.acquire_resources = job_acquire_resources;
+ request->job.release_resources = job_release_resources;
+
+ /* Synchronize memory occupied by the buffers */
+
+ /* Source buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved, false,
+ &request->user_req.src_rect);
+
+ /* Background buffer */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) &&
+ !(request->user_req.flags &
+ B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH) &&
+ (request->user_req.bg_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.bg_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.bg_img,
+ &request->bg_resolved, false,
+ &request->user_req.bg_rect);
+
+ /* Source mask buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved, false, NULL);
+
+ /* Destination buffer */
+ if (!(request->user_req.flags &
+ B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved, true,
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request for debugfs */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /* Submit the job */
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request->profile)
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+
+ mutex_lock(&instance->lock);
+
+ /* Add the job to b2r2_core */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ request->request_id = request_id;
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done if synchronous */
+ if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(&request->job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev, "%s: Failed to wait job,"
+ " ret = %d\n", __func__, ret);
+ else
+ b2r2_log_info(cont->dev, "%s: Synchronous wait done\n",
+ __func__);
+ ret = 0;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add,
+ * the request must not be accessed after this call
+ */
+ b2r2_core_job_release(&request->job, __func__);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret >= 0 ? request_id : ret;
+
+job_add_failed:
+exit_dry_run:
+no_optimized_path:
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+resolve_bg_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+ job_release(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
+ b2r2_log_warn(cont->dev, "%s returns with error %d\n",
+ __func__, ret);
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ return ret;
+}
+
+/**
+ * Called when job is done or cancelled
+ *
+ * @job: The job
+ */
+static void job_callback(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ if (cont->dev)
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
+ &request->bg_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /* Add a reference because we put the job in the report list */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ if (request->profile) {
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() - request->start_time_nsec);
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+static void job_release(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ b2r2_node_split_cancel(cont, &request->node_split_job);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+/**
+ * Tells the job to try to allocate the resources needed to execute the job.
+ * Called just before execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+ int ret;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (request->buf_count == 0)
+ return 0;
+
+ if (request->buf_count > MAX_TMP_BUFS_NEEDED) {
+ b2r2_log_err(cont->dev,
+ "%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
+ __func__);
+ return -ENOMSG;
+ }
+
+ /*
+ * 1 to 1 mapping between request temp buffers and temp buffers
+ * (request temp buf 0 is always temp buf 0, request temp buf 1 is
+ * always temp buf 1 and so on) to avoid starvation of jobs that
+ * require multiple temp buffers. Not optimal in terms of memory
+ * usage but we avoid get into a situation where lower prio jobs can
+ * delay higher prio jobs that require more temp buffers.
+ */
+ if (cont->tmp_bufs[0].in_use)
+ return -EAGAIN;
+
+ for (i = 0; i < request->buf_count; i++) {
+ if (cont->tmp_bufs[i].buf.size < request->bufs[i].size) {
+ b2r2_log_err(cont->dev, "%s: "
+ "cont->tmp_bufs[i].buf.size < "
+ "request->bufs[i].size\n", __func__);
+ ret = -ENOMSG;
+ goto error;
+ }
+
+ cont->tmp_bufs[i].in_use = true;
+ request->bufs[i].phys_addr = cont->tmp_bufs[i].buf.phys_addr;
+ request->bufs[i].virt_addr = cont->tmp_bufs[i].buf.virt_addr;
+
+ b2r2_log_info(cont->dev, "%s: phys=%p, virt=%p\n",
+ __func__, (void *)request->bufs[i].phys_addr,
+ request->bufs[i].virt_addr);
+
+ ret = b2r2_node_split_assign_buffers(cont,
+ &request->node_split_job,
+ request->first_node, request->bufs,
+ request->buf_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < request->buf_count; i++)
+ cont->tmp_bufs[i].in_use = false;
+
+ return ret;
+}
+
+/**
+ * Tells the job to free the resources needed to execute the job.
+ * Called after execution of a job.
+ *
+ * @job: The job
+ * @atomic: true if called from atomic (i.e. interrupt) context. If function
+ * can't allocate in atomic context it should return error, it
+ * will then be called later from non-atomic context.
+ */
+static void job_release_resources(struct b2r2_core_job *job, bool atomic)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+ int i;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Free any temporary buffers */
+ for (i = 0; i < request->buf_count; i++) {
+
+ b2r2_log_info(cont->dev, "%s: freeing %d bytes\n",
+ __func__, request->bufs[i].size);
+ cont->tmp_bufs[i].in_use = false;
+ memset(&request->bufs[i], 0, sizeof(request->bufs[i]));
+ }
+ request->buf_count = 0;
+
+ /*
+ * Early release of nodes
+ * FIXME: If nodes are to be reused we don't want to release here
+ */
+ if (!atomic && request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ request->first_node = NULL;
+ }
+}
+
+#endif /* !CONFIG_B2R2_GENERIC_ONLY */
+
+#ifdef CONFIG_B2R2_GENERIC
+/**
+ * Called when job for one tile is done or cancelled
+ * in the generic path.
+ *
+ * @job: The job
+ */
+static void tile_job_callback_gen(struct b2r2_core_job *job)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Notify if a tile job is cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED)
+ b2r2_log_info(cont->dev, "%s: Tile job cancelled:\n",
+ __func__);
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when job is done or cancelled.
+ * Used for the last tile in the generic path
+ * to notify waiting clients.
+ *
+ * @job: The job
+ */
+static void job_callback_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Local addref / release within this func */
+ b2r2_core_job_addref(job, __func__);
+
+ /* Unresolve the buffers */
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+
+ /* Move to report list if the job shall be reported */
+ /* FIXME: Use a smaller struct? */
+ mutex_lock(&request->instance->lock);
+
+ if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) {
+ /* Move job to report list */
+ list_add_tail(&request->list,
+ &request->instance->report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
+
+ /* Wake up poll */
+ wake_up_interruptible(
+ &request->instance->report_list_waitq);
+
+ /*
+ * Add a reference because we put the
+ * job in the report list
+ */
+ b2r2_core_job_addref(job, __func__);
+ }
+
+ /*
+ * Decrease number of active requests and wake up
+ * synching threads if active requests reaches zero
+ */
+ BUG_ON(request->instance->no_of_active_requests == 0);
+ request->instance->no_of_active_requests--;
+ if (request->instance->synching &&
+ request->instance->no_of_active_requests == 0) {
+ request->instance->synching = false;
+ /* Wake up all syncing */
+
+ wake_up_interruptible_all(
+ &request->instance->synch_done_waitq);
+ }
+ mutex_unlock(&request->instance->lock);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Dump job if cancelled */
+ if (job->job_state == B2R2_CORE_JOB_CANCELED) {
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
+ }
+ }
+#endif
+
+ /* Local addref / release within this func */
+ b2r2_core_job_release(job, __func__);
+}
+
+/**
+ * Called when tile job should be released (free memory etc.)
+ * Should be used only for tile jobs. Tile jobs should only be used
+ * by b2r2_core, thus making ref_count trigger their release.
+ *
+ * @job: The job
+ */
+
+static void tile_job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node_address=0x%.8x, ref_count="
+ "%d\n", __func__, job->first_node_address,
+ job->ref_count);
+
+ /* Release memory for the job */
+ kfree(job);
+}
+
+/**
+ * Called when job should be released (free memory etc.)
+ *
+ * @job: The job
+ */
+
+static void job_release_gen(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_request *request =
+ container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
+
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
+
+ if (request->first_node) {
+ b2r2_debug_job_done(cont, request->first_node);
+
+ /* Free nodes */
+#ifdef B2R2_USE_NODE_GEN
+ b2r2_blt_free_nodes(cont, request->first_node);
+#else
+ b2r2_node_free(cont, request->first_node);
+#endif
+ }
+
+ /* Release memory for the request */
+ if (request->clut != NULL) {
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
+ request->clut_phys_addr);
+ request->clut = NULL;
+ request->clut_phys_addr = 0;
+ }
+ kfree(request);
+}
+
+static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+ return 0;
+}
+static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic)
+{
+ /* Nothing so far. Temporary buffers are pre-allocated */
+}
+
+/**
+ * b2r2_generic_blt - Generic implementation of the B2R2 blit request
+ *
+ * @instance: The B2R2 BLT instance
+ * @request; The request to perform
+ */
+static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_request *request)
+{
+ int ret = 0;
+ struct b2r2_blt_rect actual_dst_rect;
+ int request_id = 0;
+ struct b2r2_node *last_node = request->first_node;
+ int node_count;
+ s32 tmp_buf_width = 0;
+ s32 tmp_buf_height = 0;
+ u32 tmp_buf_count = 0;
+ s32 x;
+ s32 y;
+ const struct b2r2_blt_rect *dst_rect = &(request->user_req.dst_rect);
+ const s32 dst_img_width = request->user_req.dst_img.width;
+ const s32 dst_img_height = request->user_req.dst_img.height;
+ const enum b2r2_blt_flag flags = request->user_req.flags;
+ /* Descriptors for the temporary buffers */
+ struct b2r2_work_buf work_bufs[4];
+ struct b2r2_blt_rect dst_rect_tile;
+ int i;
+ struct b2r2_control *cont = instance->control;
+
+ u32 thread_runtime_at_start = 0;
+ s32 nsec_active_in_b2r2 = 0;
+
+ /*
+ * Early exit if zero blt.
+ * dst_rect outside of dst_img or
+ * dst_clip_rect outside of dst_img.
+ */
+ if (dst_rect->x + dst_rect->width <= 0 ||
+ dst_rect->y + dst_rect->height <= 0 ||
+ dst_img_width <= dst_rect->x ||
+ dst_img_height <= dst_rect->y ||
+ ((flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0 &&
+ (dst_img_width <= request->user_req.dst_clip_rect.x ||
+ dst_img_height <= request->user_req.dst_clip_rect.y ||
+ request->user_req.dst_clip_rect.x +
+ request->user_req.dst_clip_rect.width <= 0 ||
+ request->user_req.dst_clip_rect.y +
+ request->user_req.dst_clip_rect.height <= 0))) {
+ goto zero_blt;
+ }
+
+ if (request->profile) {
+ request->start_time_nsec = b2r2_get_curr_nsec();
+ thread_runtime_at_start = (u32)task_sched_runtime(current);
+ }
+
+ memset(work_bufs, 0, sizeof(work_bufs));
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt);
+
+ /* Debug prints of incoming request */
+ b2r2_log_info(cont->dev,
+ "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
+ "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
+ request->user_req.src_img.fmt,
+ request->user_req.flags,
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ b2r2_log_info(cont->dev,
+ "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
+ "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
+ "dst_clip_rect={%d,%d,%d,%d}\n",
+ request->user_req.dst_img.fmt,
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height,
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Wait here if synch is ongoing */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+ goto synch_interrupted;
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
+
+ /* Resolve the buffers */
+
+ /* Source buffer */
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect, false, &request->src_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_buf_failed;
+ }
+
+ /* Source mask buffer */
+ ret = resolve_buf(cont, &request->user_req.src_mask,
+ &request->user_req.src_rect, false,
+ &request->src_mask_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Resolve src mask buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_src_mask_buf_failed;
+ }
+
+ /* Destination buffer */
+ get_actual_dst_rect(&request->user_req, &actual_dst_rect);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
+ __func__, ret);
+ ret = -EAGAIN;
+ goto resolve_dst_buf_failed;
+ }
+
+ /* Debug prints of resolved buffers */
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->src_resolved.physical_address,
+ request->src_resolved.virtual_address,
+ request->src_resolved.is_pmem,
+ request->src_resolved.filep,
+ request->src_resolved.file_physical_start,
+ request->src_resolved.file_virtual_start,
+ request->src_resolved.file_len);
+
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ request->dst_resolved.physical_address,
+ request->dst_resolved.virtual_address,
+ request->dst_resolved.is_pmem,
+ request->dst_resolved.filep,
+ request->dst_resolved.file_physical_start,
+ request->dst_resolved.file_virtual_start,
+ request->dst_resolved.file_len);
+
+ /* Calculate the number of nodes (and resources) needed for this job */
+ ret = b2r2_generic_analyze(request, &tmp_buf_width,
+ &tmp_buf_height, &tmp_buf_count, &node_count);
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to analyze request, ret = %d\n",
+ __func__, ret);
+#ifdef CONFIG_DEBUG_FS
+ {
+ /* Failed, dump job to dmesg */
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ b2r2_log_info(cont->dev,
+ "%s: Analyze failed for:\n", __func__);
+ if (Buf != NULL) {
+ sprintf_req(request, Buf, sizeof(char) * 4096);
+ b2r2_log_info(cont->dev, "%s", Buf);
+ kfree(Buf);
+ } else {
+ b2r2_log_info(cont->dev,
+ "Unable to print the request. "
+ "Message buffer allocation failed.\n");
+ }
+ }
+#endif
+ goto generate_nodes_failed;
+ }
+
+ /* Allocate the nodes needed */
+#ifdef B2R2_USE_NODE_GEN
+ request->first_node = b2r2_blt_alloc_nodes(cont, node_count);
+ if (request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#else
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
+ if (ret < 0 || request->first_node == NULL) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to allocate nodes, ret = %d\n",
+ __func__, ret);
+ goto generate_nodes_failed;
+ }
+#endif
+
+ /* Allocate the temporary buffers */
+ for (i = 0; i < tmp_buf_count; i++) {
+ void *virt;
+ work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
+
+ virt = dma_alloc_coherent(cont->dev,
+ work_bufs[i].size,
+ &(work_bufs[i].phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (virt == NULL) {
+ ret = -ENOMEM;
+ goto alloc_work_bufs_failed;
+ }
+
+ work_bufs[i].virt_addr = virt;
+ memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size);
+ }
+ ret = b2r2_generic_configure(request,
+ request->first_node, &work_bufs[0], tmp_buf_count);
+
+ if (ret < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to perform generic configure, ret = %d\n",
+ __func__, ret);
+ goto generic_conf_failed;
+ }
+
+ /* Exit here if dry run */
+ if (flags & B2R2_BLT_FLAG_DRY_RUN)
+ goto exit_dry_run;
+
+ /*
+ * Configure the request and make sure
+ * that its job is run only for the LAST tile.
+ * This is when the request is complete
+ * and waiting clients should be notified.
+ */
+ last_node = request->first_node;
+ while (last_node && last_node->next)
+ last_node = last_node->next;
+
+ request->job.tag = (int) instance;
+ request->job.prio = request->user_req.prio;
+ request->job.first_node_address =
+ request->first_node->physical_address;
+ request->job.last_node_address =
+ last_node->physical_address;
+ request->job.callback = job_callback_gen;
+ request->job.release = job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ request->job.acquire_resources = job_acquire_resources_gen;
+ request->job.release_resources = job_release_resources_gen;
+
+ /* Flush the L1/L2 cache for the buffers */
+
+ /* Source buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) &&
+ (request->user_req.src_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved,
+ false, /*is_dst*/
+ &request->user_req.src_rect);
+
+ /* Source mask buffer */
+ if (!(flags & B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) &&
+ (request->user_req.src_mask.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved,
+ false, /*is_dst*/
+ NULL);
+
+ /* Destination buffer */
+ if (!(flags & B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) &&
+ (request->user_req.dst_img.buf.type !=
+ B2R2_BLT_PTR_PHYSICAL) &&
+ !b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved,
+ true, /*is_dst*/
+ &request->user_req.dst_rect);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Remember latest request */
+ cont->debugfs_latest_request = *request;
+#endif
+
+ /*
+ * Same nodes are reused for all the jobs needed to complete the blit.
+ * Nodes are NOT released together with associated job,
+ * as is the case with optimized b2r2_blt() path.
+ */
+ mutex_lock(&instance->lock);
+ instance->no_of_active_requests++;
+ mutex_unlock(&instance->lock);
+ /*
+ * Process all but the last row in the destination rectangle.
+ * Consider only the tiles that will actually end up inside
+ * the destination image.
+ * dst_rect->height - tmp_buf_height being <=0 is allright.
+ * The loop will not be entered since y will always be equal to or
+ * greater than zero.
+ * Early exit check at the beginning handles the cases when nothing
+ * at all should be processed.
+ */
+ y = 0;
+ if (dst_rect->y < 0)
+ y = -dst_rect->y;
+
+ for (; y < dst_rect->height - tmp_buf_height &&
+ y + dst_rect->y < dst_img_height - tmp_buf_height;
+ y += tmp_buf_height) {
+ /* Tile in the destination rectangle being processed */
+ struct b2r2_blt_rect dst_rect_tile;
+ dst_rect_tile.y = y;
+ dst_rect_tile.width = tmp_buf_width;
+ dst_rect_tile.height = tmp_buf_height;
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width && x + dst_rect->x < dst_img_width;
+ x += tmp_buf_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ */
+ struct b2r2_core_job *tile_job =
+ kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ /*
+ * Skip this tile. Do not abort,
+ * just hope for better luck
+ * with rest of the tiles.
+ * Memory might become available.
+ */
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ /* Work buffers and nodes are pre-allocated */
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width =
+ dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * Where applicable, calculate area in src buffer
+ * that is needed to generate the specified part
+ * of destination rectangle.
+ */
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+ /* Submit the job */
+ b2r2_log_info(cont->dev,
+ "%s: Submitting job\n", __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+
+ request_id = b2r2_core_job_add(cont, tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Failed to add tile job, ret = %d\n",
+ __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+
+ mutex_unlock(&instance->lock);
+
+ /* Wait for the job to be done */
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+
+ ret = b2r2_core_job_wait(tile_job);
+
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n",
+ __func__);
+
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ }
+ /* Release matching the addref in b2r2_core_job_add */
+ b2r2_core_job_release(tile_job, __func__);
+ }
+ }
+
+ x = 0;
+ if (dst_rect->x < 0)
+ x = -dst_rect->x;
+
+ for (; x < dst_rect->width &&
+ x + dst_rect->x < dst_img_width; x += tmp_buf_width) {
+ struct b2r2_core_job *tile_job = NULL;
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ /*
+ * Tile jobs are freed by the supplied release function
+ * when ref_count on a tile_job reaches zero.
+ * Do NOT allocate a tile_job for the last tile.
+ * Send the job from the request. This way clients
+ * will be notified when the whole blit is complete
+ * and not just part of it.
+ */
+ tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
+ if (tile_job == NULL) {
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
+ continue;
+ }
+ tile_job->tag = request->job.tag;
+ tile_job->prio = request->job.prio;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
+ tile_job->callback = tile_job_callback_gen;
+ tile_job->release = tile_job_release_gen;
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
+ }
+
+ dst_rect_tile.x = x;
+ if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
+ /*
+ * Only a part of the tile can be written.
+ * Limit imposed by buffer size.
+ */
+ dst_rect_tile.width = dst_img_width - (x + dst_rect->x);
+ } else if (x + tmp_buf_width > dst_rect->width) {
+ /*
+ * Only a part of the tile can be written.
+ * In this case limit imposed by dst_rect size.
+ */
+ dst_rect_tile.width = dst_rect->width - x;
+ } else {
+ /* Whole tile can be written. */
+ dst_rect_tile.width = tmp_buf_width;
+ }
+ /*
+ * y is now the last row. Either because the whole dst_rect
+ * has been processed, or because the last row that will be
+ * written to dst_img has been reached. Limits imposed in
+ * the same way as for width.
+ */
+ dst_rect_tile.y = y;
+ if (y + dst_rect->y + tmp_buf_height > dst_img_height)
+ dst_rect_tile.height =
+ dst_img_height - (y + dst_rect->y);
+ else if (y + tmp_buf_height > dst_rect->height)
+ dst_rect_tile.height = dst_rect->height - y;
+ else
+ dst_rect_tile.height = tmp_buf_height;
+
+ b2r2_generic_set_areas(request,
+ request->first_node, &dst_rect_tile);
+
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
+
+ mutex_lock(&instance->lock);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ request_id = b2r2_core_job_add(cont, tile_job);
+ } else {
+ /*
+ * Last tile. Send the job-struct from the request.
+ * Clients will be notified once it completes.
+ */
+ request_id = b2r2_core_job_add(cont, &request->job);
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt_add);
+
+ if (request_id < 0) {
+ b2r2_log_warn(cont->dev, "%s: Failed to add tile job, "
+ "ret = %d\n", __func__, request_id);
+ ret = request_id;
+ mutex_unlock(&instance->lock);
+ if (tile_job != NULL)
+ kfree(tile_job);
+ goto job_add_failed;
+ }
+
+ inc_stat(cont, &cont->stat_n_jobs_added);
+ mutex_unlock(&instance->lock);
+
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
+ __func__);
+
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ ret = b2r2_core_job_wait(tile_job);
+ } else {
+ /*
+ * This is the last tile. Wait for the job-struct from
+ * the request.
+ */
+ ret = b2r2_core_job_wait(&request->job);
+ }
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
+
+ if (ret < 0 && ret != -ENOENT)
+ b2r2_log_warn(cont->dev,
+ "%s: Failed to wait job, ret = %d\n",
+ __func__, ret);
+ else {
+ b2r2_log_info(cont->dev,
+ "%s: Synchronous wait done\n", __func__);
+
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width)
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
+ else
+ nsec_active_in_b2r2 +=
+ request->job.nsec_active_in_hw;
+ }
+
+ /*
+ * Release matching the addref in b2r2_core_job_add.
+ * Make sure that the correct job-struct is released
+ * when the last tile is processed.
+ */
+ if (x + tmp_buf_width < dst_rect->width &&
+ x + dst_rect->x + tmp_buf_width <
+ dst_img_width) {
+ b2r2_core_job_release(tile_job, __func__);
+ } else {
+ /*
+ * Update profiling information before
+ * the request is released together with
+ * its core_job.
+ */
+ if (request->profile) {
+ request->nsec_active_in_cpu =
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
+ request->total_time_nsec =
+ (s32)(b2r2_get_curr_nsec() -
+ request->start_time_nsec);
+ request->job.nsec_active_in_hw =
+ nsec_active_in_b2r2;
+
+ b2r2_call_profiler_blt_done(request);
+ }
+
+ b2r2_core_job_release(&request->job, __func__);
+ }
+ }
+
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ for (i = 0; i < tmp_buf_count; i++) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+
+ return request_id;
+
+job_add_failed:
+exit_dry_run:
+generic_conf_failed:
+alloc_work_bufs_failed:
+ for (i = 0; i < 4; i++) {
+ if (work_bufs[i].virt_addr != 0) {
+ dma_free_coherent(cont->dev,
+ work_bufs[i].size,
+ work_bufs[i].virt_addr,
+ work_bufs[i].phys_addr);
+ memset(&(work_bufs[i]), 0, sizeof(work_bufs[i]));
+ }
+ }
+
+generate_nodes_failed:
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
+ &request->dst_resolved);
+resolve_dst_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
+ &request->src_mask_resolved);
+resolve_src_mask_buf_failed:
+ unresolve_buf(cont, &request->user_req.src_img.buf,
+ &request->src_resolved);
+resolve_src_buf_failed:
+synch_interrupted:
+zero_blt:
+ job_release_gen(&request->job);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ dec_stat(cont, &cont->stat_n_in_blt);
+
+ b2r2_log_info(cont->dev, "b2r2:%s ret=%d", __func__, ret);
+ return ret;
+}
+#endif /* CONFIG_B2R2_GENERIC */
+
+/**
+ * b2r2_blt_synch - Implements wait for all or a specified job
+ *
+ * @instance: The B2R2 BLT instance
+ * @request_id: If 0, wait for all requests on this instance to finish.
+ * Else wait for request with given request id to finish.
+ */
+static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
+ int request_id)
+{
+ int ret = 0;
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s, request_id=%d\n", __func__, request_id);
+
+ if (request_id == 0) {
+ /* Wait for all requests */
+ inc_stat(cont, &cont->stat_n_in_synch_0);
+
+ /* Enter state "synching" if we have any active request */
+ mutex_lock(&instance->lock);
+ if (instance->no_of_active_requests)
+ instance->synching = true;
+ mutex_unlock(&instance->lock);
+
+ /* Wait until no longer in state synching */
+ ret = wait_event_interruptible(instance->synch_done_waitq,
+ !is_synching(instance));
+ dec_stat(cont, &cont->stat_n_in_synch_0);
+ } else {
+ struct b2r2_core_job *job;
+
+ inc_stat(cont, &cont->stat_n_in_synch_job);
+
+ /* Wait for specific job */
+ job = b2r2_core_job_find(cont, request_id);
+ if (job) {
+ /* Wait on find job */
+ ret = b2r2_core_job_wait(job);
+ /* Release matching the addref in b2r2_core_job_find */
+ b2r2_core_job_release(job, __func__);
+ }
+
+ /* If job not found we assume that is has been run */
+ dec_stat(cont, &cont->stat_n_in_synch_job);
+ }
+
+ b2r2_log_info(cont->dev,
+ "%s, request_id=%d, returns %d\n", __func__, request_id, ret);
+
+ return ret;
+}
+
+/**
+ * Query B2R2 capabilities
+ *
+ * @instance: The B2R2 BLT instance
+ * @query_cap: The structure receiving the capabilities
+ */
+static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
+ struct b2r2_blt_query_cap *query_cap)
+{
+ /* FIXME: Not implemented yet */
+ return -ENOSYS;
+}
+
+static void get_actual_dst_rect(struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *actual_dst_rect)
+{
+ struct b2r2_blt_rect dst_img_bounds;
+
+ b2r2_get_img_bounding_rect(&req->dst_img, &dst_img_bounds);
+
+ b2r2_intersect_rects(&req->dst_rect, &dst_img_bounds, actual_dst_rect);
+
+ if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP)
+ b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect,
+ actual_dst_rect);
+}
+
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region)
+{
+ s32 img_size;
+
+ memset(region, 0, sizeof(*region));
+
+ if (b2r2_is_zero_area_rect(rect))
+ return;
+
+ img_size = b2r2_get_img_size(cont, img);
+
+ if (b2r2_is_single_plane_fmt(img->fmt) &&
+ b2r2_is_independent_pixel_fmt(img->fmt)) {
+ int img_fmt_bpp = b2r2_get_fmt_bpp(cont, img->fmt);
+ u32 img_pitch = b2r2_get_img_pitch(cont, img);
+
+ region->offset = (u32)(img->buf.offset + (rect->y *
+ img_pitch));
+ region->count = (u32)rect->height;
+ region->start = (u32)((rect->x * img_fmt_bpp) / 8);
+ region->end = (u32)b2r2_div_round_up(
+ (rect->x + rect->width) * img_fmt_bpp, 8);
+ region->size = img_pitch;
+ } else {
+ /*
+ * TODO: Locking entire buffer as a quick safe solution. In the
+ * future we should lock less to avoid unecessary cache
+ * synching. Pixel interleaved YCbCr formats should be quite
+ * easy, just align start and stop points on 2.
+ */
+ region->offset = (u32)img->buf.offset;
+ region->count = 1;
+ region->start = 0;
+ region->end = (u32)img_size;
+ region->size = (u32)img_size;
+ }
+}
+
+static int resolve_hwmem(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf)
+{
+ int return_value = 0;
+ enum hwmem_mem_type mem_type;
+ enum hwmem_access access;
+ enum hwmem_access required_access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region region;
+
+ resolved_buf->hwmem_alloc =
+ hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(resolved_buf->hwmem_alloc)) {
+ return_value = PTR_ERR(resolved_buf->hwmem_alloc);
+ b2r2_log_info(cont->dev, "%s: hwmem_resolve_by_name failed, "
+ "error code: %i\n", __func__, return_value);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(resolved_buf->hwmem_alloc, &resolved_buf->file_len,
+ &mem_type, &access);
+
+ required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) |
+ HWMEM_ACCESS_IMPORT;
+ if ((required_access & access) != required_access) {
+ b2r2_log_info(cont->dev, "%s: Insufficient access to hwmem "
+ "buffer.\n", __func__);
+ return_value = -EACCES;
+ goto access_check_failed;
+ }
+
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer is scattered.\n",
+ __func__);
+ return_value = -EINVAL;
+ goto buf_scattered;
+ }
+
+ if (resolved_buf->file_len <
+ img->buf.offset + (__u32)b2r2_get_img_size(cont, img)) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer too small. (%d < "
+ "%d)\n", __func__, resolved_buf->file_len,
+ img->buf.offset +
+ (__u32)b2r2_get_img_size(cont, img));
+ return_value = -EINVAL;
+ goto size_check_failed;
+ }
+
+ return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk,
+ &mem_chunk_length);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_pin failed, "
+ "error code: %i\n", __func__, return_value);
+ goto pin_failed;
+ }
+ resolved_buf->file_physical_start = mem_chunk.paddr;
+
+ set_up_hwmem_region(cont, img, rect_2b_used, &region);
+ return_value = hwmem_set_domain(resolved_buf->hwmem_alloc,
+ required_access, HWMEM_DOMAIN_SYNC, &region);
+ if (return_value < 0) {
+ b2r2_log_info(cont->dev, "%s: hwmem_set_domain failed, "
+ "error code: %i\n", __func__, return_value);
+ goto set_domain_failed;
+ }
+
+ resolved_buf->physical_address =
+ resolved_buf->file_physical_start + img->buf.offset;
+
+ goto out;
+
+set_domain_failed:
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+pin_failed:
+size_check_failed:
+buf_scattered:
+access_check_failed:
+ hwmem_release(resolved_buf->hwmem_alloc);
+resolve_failed:
+
+out:
+ return return_value;
+}
+
+static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf)
+{
+ hwmem_unpin(resolved_buf->hwmem_alloc);
+ hwmem_release(resolved_buf->hwmem_alloc);
+}
+
+/**
+ * unresolve_buf() - Must be called after resolve_buf
+ *
+ * @buf: The buffer specification as supplied from user space
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved)
+{
+#ifdef CONFIG_ANDROID_PMEM
+ if (resolved->is_pmem && resolved->filep)
+ put_pmem_file(resolved->filep);
+#endif
+ if (resolved->hwmem_alloc != NULL)
+ unresolve_hwmem(resolved);
+}
+
+/**
+ * get_fb_info() - Fill buf with framebuffer info
+ *
+ * @file: The framebuffer file
+ * @buf: Gathered information about the buffer
+ * @img_offset: Image offset info frame buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int get_fb_info(struct file *file,
+ struct b2r2_resolved_buf *buf,
+ __u32 img_offset)
+{
+#ifdef CONFIG_FB
+ if (file && buf &&
+ MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+ int i;
+ /*
+ * (OK to do it like this, no locking???)
+ */
+ for (i = 0; i < num_registered_fb; i++) {
+ struct fb_info *info = registered_fb[i];
+
+ if (info && info->dev &&
+ MINOR(info->dev->devt) ==
+ MINOR(file->f_dentry->d_inode->i_rdev)) {
+ buf->file_physical_start = info->fix.smem_start;
+ buf->file_virtual_start = (u32)info->screen_base;
+ buf->file_len = info->fix.smem_len;
+ buf->physical_address = buf->file_physical_start +
+ img_offset;
+ buf->virtual_address =
+ (void *) (buf->file_virtual_start +
+ img_offset);
+ return 0;
+ }
+ }
+ }
+#endif
+ return -EINVAL;
+}
+
+/**
+ * resolve_buf() - Returns the physical & virtual addresses of a B2R2 blt buffer
+ *
+ * @img: The image specification as supplied from user space
+ * @rect_2b_used: The part of the image b2r2 will use.
+ * @usage: Specifies how the buffer will be used.
+ * @resolved: Gathered information about the buffer
+ *
+ * Returns 0 if OK else negative error code
+ */
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst,
+ struct b2r2_resolved_buf *resolved)
+{
+ int ret = 0;
+
+ memset(resolved, 0, sizeof(*resolved));
+
+ switch (img->buf.type) {
+ case B2R2_BLT_PTR_NONE:
+ break;
+
+ case B2R2_BLT_PTR_PHYSICAL:
+ resolved->physical_address = img->buf.offset;
+ resolved->file_len = img->buf.len;
+ break;
+
+ /* FD + OFFSET type */
+ case B2R2_BLT_PTR_FD_OFFSET: {
+ /*
+ * TODO: Do we need to check if the process is allowed to
+ * read/write (depending on if it's dst or src) to the file?
+ */
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(
+ img->buf.fd,
+ (unsigned long *) &resolved->file_physical_start,
+ (unsigned long *) &resolved->file_virtual_start,
+ (unsigned long *) &resolved->file_len,
+ &resolved->filep)) {
+ resolved->physical_address =
+ resolved->file_physical_start +
+ img->buf.offset;
+ resolved->virtual_address = (void *)
+ (resolved->file_virtual_start +
+ img->buf.offset);
+ resolved->is_pmem = true;
+ } else
+#endif
+ {
+ int fput_needed;
+ struct file *file;
+
+ file = fget_light(img->buf.fd, &fput_needed);
+ if (file == NULL)
+ return -EINVAL;
+
+ ret = get_fb_info(file, resolved,
+ img->buf.offset);
+ fput_light(file, fput_needed);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Check bounds */
+ if (img->buf.offset + img->buf.len >
+ resolved->file_len) {
+ ret = -ESPIPE;
+ unresolve_buf(cont, &img->buf, resolved);
+ }
+
+ break;
+ }
+
+ case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET:
+ ret = resolve_hwmem(cont, img, rect_2b_used, is_dst, resolved);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Failed to resolve buf type %d\n",
+ __func__, img->buf.type);
+
+ ret = -EINVAL;
+ break;
+
+ }
+
+ return ret;
+}
+
+/**
+ * sync_buf - Synchronizes the memory occupied by an image buffer.
+ *
+ * @buf: User buffer specification
+ * @resolved_buf: Gathered info (physical address etc.) about buffer
+ * @is_dst: true if the buffer is a destination buffer, false if the buffer is a
+ * source buffer.
+ * @rect: rectangle in the image buffer that should be synced.
+ * NULL if the buffer is a source mask.
+ * @img_width: width of the complete image buffer
+ * @fmt: buffer format
+*/
+static void sync_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved,
+ bool is_dst,
+ struct b2r2_blt_rect *rect)
+{
+ struct sync_args sa;
+ u32 start_phys, end_phys;
+
+ if (B2R2_BLT_PTR_NONE == img->buf.type ||
+ B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type)
+ return;
+
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+
+ /*
+ * TODO: Very ugly. We should find out whether the memory is coherent in
+ * some generic way but cache handling will be rewritten soon so there
+ * is no use spending time on it. In the new design this will probably
+ * not be a problem.
+ */
+ /* Frame buffer is coherent, at least now. */
+ if (!resolved->is_pmem) {
+ /*
+ * Drain the write buffers as they are not always part of the
+ * coherent concept.
+ */
+ wmb();
+
+ return;
+ }
+
+ /*
+ * src_mask does not have rect.
+ * Also flush full buffer for planar and semiplanar YUV formats
+ */
+ if (rect == NULL ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR) ||
+ (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE) ||
+ (img->fmt ==
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE)) {
+ sa.start = (unsigned long)resolved->virtual_address;
+ sa.end = (unsigned long)resolved->virtual_address +
+ img->buf.len;
+ start_phys = resolved->physical_address;
+ end_phys = resolved->physical_address + img->buf.len;
+ } else {
+ /*
+ * buffer is not a src_mask so make use of rect when
+ * clean & flush caches
+ */
+ u32 bpp; /* Bits per pixel */
+ u32 pitch;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ bpp = 16;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ bpp = 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ bpp = 32;
+ break;
+ default:
+ bpp = 12;
+ }
+ if (img->pitch == 0)
+ pitch = (img->width * bpp) / 8;
+ else
+ pitch = img->pitch;
+
+ /*
+ * For 422I formats 2 horizontal pixels share color data.
+ * Thus, the x position must be aligned down to closest even
+ * number and width must be aligned up.
+ */
+ {
+ s32 x;
+ s32 width;
+
+ switch (img->fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ x = (rect->x / 2) * 2;
+ width = ((rect->width + 1) / 2) * 2;
+ break;
+ default:
+ x = rect->x;
+ width = rect->width;
+ break;
+ }
+
+ sa.start = (unsigned long)resolved->virtual_address +
+ rect->y * pitch + (x * bpp) / 8;
+ sa.end = (unsigned long)sa.start +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+
+ start_phys = resolved->physical_address +
+ rect->y * pitch + (x * bpp) / 8;
+ end_phys = start_phys +
+ (rect->height - 1) * pitch +
+ (width * bpp) / 8;
+ }
+ }
+
+ /*
+ * The virtual address to a pmem buffer is retrieved from ioremap, not
+ * sure if it's ok to use such an address as a kernel virtual address.
+ * When doing it at a higher level such as dma_map_single it triggers an
+ * error but at lower levels such as dmac_clean_range it seems to work,
+ * hence the low level stuff.
+ */
+
+ if (is_dst) {
+ /*
+ * According to ARM's docs you must clean before invalidating
+ * (ie flush) to avoid loosing data.
+ */
+
+ /* Flush L1 cache */
+#ifdef CONFIG_SMP
+ flush_l1_cache_range_all_cpus(&sa);
+#else
+ flush_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Flush L2 cache */
+ outer_flush_range(start_phys, end_phys);
+ } else {
+ /* Clean L1 cache */
+#ifdef CONFIG_SMP
+ clean_l1_cache_range_all_cpus(&sa);
+#else
+ clean_l1_cache_range_curr_cpu(&sa);
+#endif
+
+ /* Clean L2 cache */
+ outer_clean_range(start_phys, end_phys);
+ }
+}
+
+/**
+ * is_report_list_empty() - Spin lock protected check of report list
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_report_list_empty(struct b2r2_blt_instance *instance)
+{
+ bool is_empty;
+
+ mutex_lock(&instance->lock);
+ is_empty = list_empty(&instance->report_list);
+ mutex_unlock(&instance->lock);
+
+ return is_empty;
+}
+
+/**
+ * is_synching() - Spin lock protected check if synching
+ *
+ * @instance: The B2R2 BLT instance
+ */
+static bool is_synching(struct b2r2_blt_instance *instance)
+{
+ bool is_synching;
+
+ mutex_lock(&instance->lock);
+ is_synching = instance->synching;
+ mutex_unlock(&instance->lock);
+
+ return is_synching;
+}
+
+/**
+ * inc_stat() - Spin lock protected increment of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be incremented
+ */
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)++;
+ mutex_unlock(&cont->stat_lock);
+}
+
+/**
+ * inc_stat() - Spin lock protected decrement of statistics variable
+ *
+ * @stat: Pointer to statistics variable that should be decremented
+ */
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat)
+{
+ mutex_lock(&cont->stat_lock);
+ (*stat)--;
+ mutex_unlock(&cont->stat_lock);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sprintf_req() - Builds a string representing the request, for debug
+ *
+ * @request:Request that should be encoded into a string
+ * @buf: Receiving buffer
+ * @size: Size of receiving buffer
+ *
+ * Returns number of characters in string, excluding null terminator
+ */
+static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
+{
+ size_t dev_size = 0;
+
+ /* generic request info */
+ dev_size += sprintf(buf + dev_size,
+ "instance : 0x%08lX\n",
+ (unsigned long) request->instance);
+ dev_size += sprintf(buf + dev_size,
+ "size : %d bytes\n", request->user_req.size);
+ dev_size += sprintf(buf + dev_size,
+ "flags : 0x%08lX\n",
+ (unsigned long) request->user_req.flags);
+ dev_size += sprintf(buf + dev_size,
+ "transform : %d\n",
+ (int) request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "prio : %d\n", request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "global_alpha : %d\n",
+ (int) request->user_req.global_alpha);
+ dev_size += sprintf(buf + dev_size,
+ "report1 : 0x%08lX\n",
+ (unsigned long) request->user_req.report1);
+ dev_size += sprintf(buf + dev_size,
+ "report2 : 0x%08lX\n",
+ (unsigned long) request->user_req.report2);
+ dev_size += sprintf(buf + dev_size,
+ "request_id : 0x%08lX\n\n",
+ (unsigned long) request->request_id);
+
+ /* src info */
+ dev_size += sprintf(buf + dev_size,
+ "src_img.fmt : %#010x\n",
+ request->user_req.src_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d, "
+ "offset=%d, len=%d}\n",
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.hwmem_buf_name,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.fmt : %#010x\n",
+ request->user_req.src_mask.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.src_mask.buf.type,
+ request->user_req.src_mask.buf.hwmem_buf_name,
+ request->user_req.src_mask.buf.fd,
+ request->user_req.src_mask.buf.offset,
+ request->user_req.src_mask.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_mask.width,
+ request->user_req.src_mask.height,
+ request->user_req.src_mask.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "src_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "src_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.src_color);
+
+ /* bg info */
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.fmt : %#010x\n",
+ request->user_req.bg_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.hwmem_buf_name,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "bg_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "bg_rect : {x=%d, y=%d, width=%d, height=%d}\n\n",
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ /* dst info */
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.fmt : %#010x\n",
+ request->user_req.dst_img.fmt);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.hwmem_buf_name,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.buf.len);
+ dev_size += sprintf(buf + dev_size,
+ "dst_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_img.pitch);
+ dev_size += sprintf(buf + dev_size,
+ "dst_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_clip_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
+ dev_size += sprintf(buf + dev_size,
+ "dst_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.dst_color);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_resolved.file_len : %d\n\n",
+ request->src_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "src_mask_resolved.file_len : %d\n\n",
+ request->src_mask_resolved.file_len);
+
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ physical_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.virtual_address);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.filep);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ file_physical_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.file_virtual_start);
+ dev_size += sprintf(buf + dev_size,
+ "dst_resolved.file_len : %d\n\n",
+ request->dst_resolved.file_len);
+
+ return dev_size;
+}
+
+/**
+ * debugfs_b2r2_blt_request_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_size = sprintf_req(&cont->debugfs_latest_request, Buf,
+ sizeof(char) * 4096);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_request_fops - File operations for B2R2 request debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_request_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_request_read,
+};
+
+/**
+ * struct debugfs_reg - Represents a B2R2 node "register"
+ *
+ * @name: Register name
+ * @offset: Offset within the node
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_node_regs - Array with all the registers in a B2R2 node, for debug
+ */
+static const struct debugfs_reg debugfs_node_regs[] = {
+ {"GROUP0.B2R2_NIP", offsetof(struct b2r2_link_list, GROUP0.B2R2_NIP)},
+ {"GROUP0.B2R2_CIC", offsetof(struct b2r2_link_list, GROUP0.B2R2_CIC)},
+ {"GROUP0.B2R2_INS", offsetof(struct b2r2_link_list, GROUP0.B2R2_INS)},
+ {"GROUP0.B2R2_ACK", offsetof(struct b2r2_link_list, GROUP0.B2R2_ACK)},
+
+ {"GROUP1.B2R2_TBA", offsetof(struct b2r2_link_list, GROUP1.B2R2_TBA)},
+ {"GROUP1.B2R2_TTY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TTY)},
+ {"GROUP1.B2R2_TXY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TXY)},
+ {"GROUP1.B2R2_TSZ", offsetof(struct b2r2_link_list, GROUP1.B2R2_TSZ)},
+
+ {"GROUP2.B2R2_S1CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S1CF)},
+ {"GROUP2.B2R2_S2CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S2CF)},
+
+ {"GROUP3.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP3.B2R2_SBA)},
+ {"GROUP3.B2R2_STY", offsetof(struct b2r2_link_list, GROUP3.B2R2_STY)},
+ {"GROUP3.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP3.B2R2_SXY)},
+ {"GROUP3.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP3.B2R2_SSZ)},
+
+ {"GROUP4.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP4.B2R2_SBA)},
+ {"GROUP4.B2R2_STY", offsetof(struct b2r2_link_list, GROUP4.B2R2_STY)},
+ {"GROUP4.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP4.B2R2_SXY)},
+ {"GROUP4.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP4.B2R2_SSZ)},
+
+ {"GROUP5.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP5.B2R2_SBA)},
+ {"GROUP5.B2R2_STY", offsetof(struct b2r2_link_list, GROUP5.B2R2_STY)},
+ {"GROUP5.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP5.B2R2_SXY)},
+ {"GROUP5.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP5.B2R2_SSZ)},
+
+ {"GROUP6.B2R2_CWO", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWO)},
+ {"GROUP6.B2R2_CWS", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWS)},
+
+ {"GROUP7.B2R2_CCO", offsetof(struct b2r2_link_list, GROUP7.B2R2_CCO)},
+ {"GROUP7.B2R2_CML", offsetof(struct b2r2_link_list, GROUP7.B2R2_CML)},
+
+ {"GROUP8.B2R2_FCTL", offsetof(struct b2r2_link_list, GROUP8.B2R2_FCTL)},
+ {"GROUP8.B2R2_PMK", offsetof(struct b2r2_link_list, GROUP8.B2R2_PMK)},
+
+ {"GROUP9.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP9.B2R2_RSF)},
+ {"GROUP9.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP9.B2R2_RZI)},
+ {"GROUP9.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_HFP)},
+ {"GROUP9.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_VFP)},
+
+ {"GROUP10.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP10.B2R2_RSF)},
+ {"GROUP10.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP10.B2R2_RZI)},
+ {"GROUP10.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_HFP)},
+ {"GROUP10.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_VFP)},
+
+ {"GROUP11.B2R2_FF0", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF0)},
+ {"GROUP11.B2R2_FF1", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF1)},
+ {"GROUP11.B2R2_FF2", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF2)},
+ {"GROUP11.B2R2_FF3", offsetof(struct b2r2_link_list,
+ GROUP11.B2R2_FF3)},
+
+ {"GROUP12.B2R2_KEY1", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY1)},
+ {"GROUP12.B2R2_KEY2", offsetof(struct b2r2_link_list,
+ GROUP12.B2R2_KEY2)},
+
+ {"GROUP13.B2R2_XYL", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYL)},
+ {"GROUP13.B2R2_XYP", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYP)},
+
+ {"GROUP14.B2R2_SAR", offsetof(struct b2r2_link_list, GROUP14.B2R2_SAR)},
+ {"GROUP14.B2R2_USR", offsetof(struct b2r2_link_list, GROUP14.B2R2_USR)},
+
+ {"GROUP15.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX0)},
+ {"GROUP15.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX1)},
+ {"GROUP15.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX2)},
+ {"GROUP15.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP15.B2R2_VMX3)},
+
+ {"GROUP16.B2R2_VMX0", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX0)},
+ {"GROUP16.B2R2_VMX1", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX1)},
+ {"GROUP16.B2R2_VMX2", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX2)},
+ {"GROUP16.B2R2_VMX3", offsetof(struct b2r2_link_list,
+ GROUP16.B2R2_VMX3)},
+};
+
+/**
+ * debugfs_b2r2_blt_stat_read() - Implements debugfs read for B2R2 BLT
+ * statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&cont->stat_lock);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ cont->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Released jobs : %lu\n",
+ cont->stat_n_jobs_released);
+ dev_size += sprintf(Buf + dev_size, "Jobs in report list : %lu\n",
+ cont->stat_n_jobs_in_report_list);
+ dev_size += sprintf(Buf + dev_size, "Clients in open : %lu\n",
+ cont->stat_n_in_open);
+ dev_size += sprintf(Buf + dev_size, "Clients in release : %lu\n",
+ cont->stat_n_in_release);
+ dev_size += sprintf(Buf + dev_size, "Clients in blt : %lu\n",
+ cont->stat_n_in_blt);
+ dev_size += sprintf(Buf + dev_size, " synch : %lu\n",
+ cont->stat_n_in_blt_synch);
+ dev_size += sprintf(Buf + dev_size, " add : %lu\n",
+ cont->stat_n_in_blt_add);
+ dev_size += sprintf(Buf + dev_size, " wait : %lu\n",
+ cont->stat_n_in_blt_wait);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch 0 : %lu\n",
+ cont->stat_n_in_synch_0);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch job : %lu\n",
+ cont->stat_n_in_synch_job);
+ dev_size += sprintf(Buf + dev_size, "Clients in query_cap : %lu\n",
+ cont->stat_n_in_query_cap);
+ mutex_unlock(&cont->stat_lock);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_blt_stat_fops() - File operations for B2R2 BLT
+ * statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_blt_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_blt_stat_read,
+};
+#endif
+
+static void init_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < (sizeof(cont->tmp_bufs) / sizeof(struct tmp_buf));
+ i++) {
+ cont->tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
+ cont->dev, MAX_TMP_BUF_SIZE,
+ &cont->tmp_bufs[i].buf.phys_addr, GFP_DMA);
+ if (cont->tmp_bufs[i].buf.virt_addr != NULL)
+ cont->tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
+ else {
+ b2r2_log_err(cont->dev, "%s: Failed to allocate temp "
+ "buffer %i\n", __func__, i);
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+static void destroy_tmp_bufs(struct b2r2_control *cont)
+{
+ int i = 0;
+
+ for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
+ if (cont->tmp_bufs[i].buf.size != 0) {
+ dma_free_coherent(cont->dev,
+ cont->tmp_bufs[i].buf.size,
+ cont->tmp_bufs[i].buf.virt_addr,
+ cont->tmp_bufs[i].buf.phys_addr);
+
+ cont->tmp_bufs[i].buf.size = 0;
+ }
+ }
+}
+
+/**
+ * b2r2_blt_module_init() - Module init function
+ *
+ * Returns 0 if OK else negative error code
+ */
+int b2r2_blt_module_init(struct b2r2_control *cont)
+{
+ int ret;
+
+ mutex_init(&cont->stat_lock);
+
+ /* Register b2r2 driver */
+ cont->miscdev.minor = MISC_DYNAMIC_MINOR;
+ cont->miscdev.name = cont->name;
+ cont->miscdev.fops = &b2r2_blt_fops;
+
+ ret = misc_register(&cont->miscdev);
+ if (ret) {
+ printk(KERN_WARNING "%s: registering misc device fails\n",
+ __func__);
+ goto b2r2_misc_register_fail;
+ }
+
+ cont->dev = cont->miscdev.this_device;
+ dev_set_drvdata(cont->dev, cont);
+
+#ifdef CONFIG_B2R2_GENERIC
+ /* Initialize generic path */
+ b2r2_generic_init(cont);
+#endif
+ /* Initialize node splitter */
+ ret = b2r2_node_split_init(cont);
+ if (ret) {
+ printk(KERN_WARNING "%s: node split init fails\n", __func__);
+ goto b2r2_node_split_init_fail;
+ }
+
+ b2r2_log_info(cont->dev, "%s: device registered\n", __func__);
+
+ /*
+ * FIXME: This stuff should be done before the first requests i.e.
+ * before misc_register, but they need the device which is not
+ * available until after misc_register.
+ */
+ cont->dev->coherent_dma_mask = 0xFFFFFFFF;
+ init_tmp_bufs(cont);
+ ret = b2r2_filters_init(cont);
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: failed to init filters\n",
+ __func__);
+ goto b2r2_filter_init_fail;
+ }
+
+ /* Initialize memory allocator */
+ ret = b2r2_mem_init(cont, B2R2_HEAP_SIZE,
+ 4, sizeof(struct b2r2_node));
+ if (ret) {
+ printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n",
+ __func__);
+ goto b2r2_mem_init_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs */
+ if (cont->debugfs_root_dir) {
+ debugfs_create_file("last_request", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_request_fops);
+ debugfs_create_file("stats", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_stat_fops);
+ }
+#endif
+
+ b2r2_ctl[cont->id] = cont;
+ b2r2_log_info(cont->dev, "%s: done\n", __func__);
+
+ return ret;
+
+b2r2_mem_init_fail:
+ b2r2_filters_exit(cont);
+b2r2_filter_init_fail:
+ b2r2_node_split_exit(cont);
+b2r2_node_split_init_fail:
+#ifdef CONFIG_B2R2_GENERIC
+ b2r2_generic_exit(cont);
+#endif
+ misc_deregister(&cont->miscdev);
+b2r2_misc_register_fail:
+ return ret;
+}
+
+/**
+ * b2r2_module_exit() - Module exit function
+ */
+void b2r2_blt_module_exit(struct b2r2_control *cont)
+{
+ if (cont) {
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+#ifdef CONFIG_DEBUG_FS
+ if (cont->debugfs_root_dir) {
+ debugfs_remove_recursive(cont->debugfs_root_dir);
+ cont->debugfs_root_dir = NULL;
+ }
+#endif
+ b2r2_mem_exit(cont);
+ destroy_tmp_bufs(cont);
+ b2r2_ctl[cont->id] = NULL;
+ misc_deregister(&cont->miscdev);
+ b2r2_node_split_exit(cont);
+#if defined(CONFIG_B2R2_GENERIC)
+ b2r2_generic_exit(cont);
+#endif
+ b2r2_filters_exit(cont);
+ }
+}
+
+MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
+MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
new file mode 100644
index 00000000000..629633a7888
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -0,0 +1,2819 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * TODO: Clock address from platform data
+ * Platform data should have string id instead of numbers
+ * b2r2_remove, some type of runtime problem when kernel hacking
+ * debug features on
+ *
+ * Is there already a priority list in kernel?
+ * Is it possible to handle clock using clock framework?
+ * uTimeOut, use mdelay instead?
+ * Measure performance
+ *
+ * Exchange our home-cooked ref count with kernel kref? See
+ * http://lwn.net/Articles/336224/
+ *
+ * B2R2:
+ * Source fill 2 bug
+ * Check with Symbian?
+ */
+
+/* include file */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_core.h"
+#include "b2r2_global.h"
+#include "b2r2_structures.h"
+#include "b2r2_internal.h"
+#include "b2r2_profiler_api.h"
+#include "b2r2_timing.h"
+#include "b2r2_debug.h"
+
+/**
+ * B2R2_DRIVER_TIMEOUT_VALUE - Busy loop timeout after soft reset
+ */
+#define B2R2_DRIVER_TIMEOUT_VALUE (1500)
+
+/**
+ * B2R2_CLK_FLAG - Value to write into clock reg to turn clock on
+ */
+#define B2R2_CLK_FLAG (0x125)
+
+/**
+ * DEBUG_CHECK_ADDREF_RELEASE - Define this to enable addref / release debug
+ */
+#define DEBUG_CHECK_ADDREF_RELEASE 1
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * HANDLE_TIMEOUTED_JOBS - Define this to check jobs for timeout and cancel them
+ */
+#define HANDLE_TIMEOUTED_JOBS 1
+#endif
+
+/**
+ * B2R2_CLOCK_ALWAYS_ON - Define this to disable power save clock turn off
+ */
+/* #define B2R2_CLOCK_ALWAYS_ON 1 */
+
+/**
+ * START_SENTINEL - Watch guard to detect job overwrites
+ */
+#define START_SENTINEL 0xBABEDEEA
+
+/**
+ * STOP_SENTINEL - Watch guard to detect job overwrites
+ */
+#define END_SENTINEL 0xDADBDCDD
+
+/**
+ * B2R2_CORE_LOWEST_PRIO - Lowest prio allowed
+ */
+#define B2R2_CORE_LOWEST_PRIO -19
+/**
+ * B2R2_CORE_HIGHEST_PRIO - Highest prio allowed
+ */
+#define B2R2_CORE_HIGHEST_PRIO 20
+
+/**
+ * B2R2_DOMAIN_DISABLE -
+ */
+#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
+
+/**
+ * B2R2_REGULATOR_RETRY_COUNT -
+ */
+#define B2R2_REGULATOR_RETRY_COUNT 10
+
+/**
+ * B2R2 Hardware defines below
+ */
+
+/* - BLT_AQ_CTL */
+#define B2R2_AQ_Enab (0x80000000)
+#define B2R2_AQ_PRIOR_0 (0x0)
+#define B2R2_AQ_PRIOR_1 (0x1)
+#define B2R2_AQ_PRIOR_2 (0x2)
+#define B2R2_AQ_PRIOR_3 (0x3)
+#define B2R2_AQ_NODE_REPEAT_INT (0x100000)
+#define B2R2_AQ_STOP_INT (0x200000)
+#define B2R2_AQ_LNA_REACH_INT (0x400000)
+#define B2R2_AQ_COMPLETED_INT (0x800000)
+
+/* - BLT_CTL */
+#define B2R2BLT_CTLGLOBAL_soft_reset (0x80000000)
+#define B2R2BLT_CTLStep_By_Step (0x20000000)
+#define B2R2BLT_CTLBig_not_little (0x10000000)
+#define B2R2BLT_CTLMask (0xb0000000)
+#define B2R2BLT_CTLTestMask (0xb0000000)
+#define B2R2BLT_CTLInitialValue (0x0)
+#define B2R2BLT_CTLAccessType (INITIAL_TEST)
+#define B2R2BLT_CTL (0xa00)
+
+/* - BLT_ITS */
+#define B2R2BLT_ITSRLD_ERROR (0x80000000)
+#define B2R2BLT_ITSAQ4_Node_Notif (0x8000000)
+#define B2R2BLT_ITSAQ4_Node_repeat (0x4000000)
+#define B2R2BLT_ITSAQ4_Stopped (0x2000000)
+#define B2R2BLT_ITSAQ4_LNA_Reached (0x1000000)
+#define B2R2BLT_ITSAQ3_Node_Notif (0x800000)
+#define B2R2BLT_ITSAQ3_Node_repeat (0x400000)
+#define B2R2BLT_ITSAQ3_Stopped (0x200000)
+#define B2R2BLT_ITSAQ3_LNA_Reached (0x100000)
+#define B2R2BLT_ITSAQ2_Node_Notif (0x80000)
+#define B2R2BLT_ITSAQ2_Node_repeat (0x40000)
+#define B2R2BLT_ITSAQ2_Stopped (0x20000)
+#define B2R2BLT_ITSAQ2_LNA_Reached (0x10000)
+#define B2R2BLT_ITSAQ1_Node_Notif (0x8000)
+#define B2R2BLT_ITSAQ1_Node_repeat (0x4000)
+#define B2R2BLT_ITSAQ1_Stopped (0x2000)
+#define B2R2BLT_ITSAQ1_LNA_Reached (0x1000)
+#define B2R2BLT_ITSCQ2_Repaced (0x80)
+#define B2R2BLT_ITSCQ2_Node_Notif (0x40)
+#define B2R2BLT_ITSCQ2_retriggered (0x20)
+#define B2R2BLT_ITSCQ2_completed (0x10)
+#define B2R2BLT_ITSCQ1_Repaced (0x8)
+#define B2R2BLT_ITSCQ1_Node_Notif (0x4)
+#define B2R2BLT_ITSCQ1_retriggered (0x2)
+#define B2R2BLT_ITSCQ1_completed (0x1)
+#define B2R2BLT_ITSMask (0x8ffff0ff)
+#define B2R2BLT_ITSTestMask (0x8ffff0ff)
+#define B2R2BLT_ITSInitialValue (0x0)
+#define B2R2BLT_ITSAccessType (INITIAL_TEST)
+#define B2R2BLT_ITS (0xa04)
+
+/* - BLT_STA1 */
+#define B2R2BLT_STA1BDISP_IDLE (0x1)
+#define B2R2BLT_STA1Mask (0x1)
+#define B2R2BLT_STA1TestMask (0x1)
+#define B2R2BLT_STA1InitialValue (0x1)
+#define B2R2BLT_STA1AccessType (INITIAL_TEST)
+#define B2R2BLT_STA1 (0xa08)
+
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+
+/**
+ * struct addref_release - Represents one addref or release. Used
+ * to debug addref / release problems
+ *
+ * @addref: true if this represents an addref else it represents
+ * a release.
+ * @job: The job that was referenced
+ * @caller: The caller of the addref or release
+ * @ref_count: The job reference count after addref / release
+ */
+struct addref_release {
+ bool addref;
+ struct b2r2_core_job *job;
+ const char *caller;
+ int ref_count;
+};
+
+#endif
+
+/**
+ * struct b2r2_core - Administration data for B2R2 core
+ *
+ * @lock: Spin lock protecting the b2r2_core structure and the B2R2 HW
+ * @hw: B2R2 registers memory mapped
+ * @pmu_b2r2_clock: Control of B2R2 clock
+ * @log_dev: Device used for logging via dev_... functions
+ *
+ * @prio_queue: Queue of jobs sorted in priority order
+ * @active_jobs: Array containing pointer to zero or one job per queue
+ * @n_active_jobs: Number of active jobs
+ * @jiffies_last_active: jiffie value when adding last active job
+ * @jiffies_last_irq: jiffie value when last irq occured
+ * @timeout_work: Work structure for timeout work
+ *
+ * @next_job_id: Contains the job id that will be assigned to the next
+ * added job.
+ *
+ * @clock_request_count: When non-zero, clock is on
+ * @clock_off_timer: Kernel timer to handle delayed turn off of clock
+ *
+ * @work_queue: Work queue to handle done jobs (callbacks) and timeouts in
+ * non-interrupt context.
+ *
+ * @stat_n_irq: Number of interrupts (statistics)
+ * @stat_n_jobs_added: Number of jobs added (statistics)
+ * @stat_n_jobs_removed: Number of jobs removed (statistics)
+ * @stat_n_jobs_in_prio_list: Number of jobs in prio list (statistics)
+ *
+ * @debugfs_root_dir: Root directory for B2R2 debugfs
+ *
+ * @ar: Circular array of addref / release debug structs
+ * @ar_write: Where next write will occur
+ * @ar_read: First valid place to read. When ar_read == ar_write then
+ * the array is empty.
+ */
+struct b2r2_core {
+ spinlock_t lock;
+
+ struct b2r2_memory_map *hw;
+
+ u8 op_size;
+ u8 ch_size;
+ u8 pg_size;
+ u8 mg_size;
+ u16 min_req_time;
+ int irq;
+
+ char name[16];
+ struct device *dev;
+
+ struct list_head prio_queue;
+
+ struct b2r2_core_job *active_jobs[B2R2_CORE_QUEUE_NO_OF];
+ unsigned long n_active_jobs;
+
+ unsigned long jiffies_last_active;
+ unsigned long jiffies_last_irq;
+#ifdef HANDLE_TIMEOUTED_JOBS
+ struct delayed_work timeout_work;
+#endif
+ int next_job_id;
+
+ unsigned long clock_request_count;
+ struct timer_list clock_off_timer;
+
+ struct workqueue_struct *work_queue;
+
+ /* Statistics */
+ unsigned long stat_n_irq;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_removed;
+
+ unsigned long stat_n_jobs_in_prio_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_core_root_dir;
+ struct dentry *debugfs_regs_dir;
+#endif
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Tracking release bug...*/
+ struct addref_release ar[100];
+ int ar_write;
+ int ar_read;
+#endif
+
+ /* Power management variables */
+ struct mutex domain_lock;
+ struct delayed_work domain_disable_work;
+
+ /*
+ * We need to keep track of both the number of domain_enable/disable()
+ * calls and whether the power was actually turned off, since the
+ * power off is done in a delayed job.
+ */
+ bool domain_enabled;
+ int domain_request_count;
+
+ struct clk *b2r2_clock;
+ struct regulator *b2r2_reg;
+
+ struct b2r2_control *control;
+};
+
+/**
+ * b2r2_core - Quick link to administration data for B2R2
+ */
+static struct b2r2_core *b2r2_core[B2R2_MAX_NBR_DEVICES];
+
+/* Local functions */
+static void check_prio_list(struct b2r2_core *core, bool atomic);
+static void clear_interrupts(struct b2r2_core *core);
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job);
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_list);
+static int get_next_job_id(struct b2r2_core *core);
+static void job_work_function(struct work_struct *ptr);
+static void init_job(struct b2r2_core_job *job);
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list);
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id);
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list);
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag);
+
+static int domain_enable(struct b2r2_core *core);
+static void domain_disable(struct b2r2_core *core);
+
+static void stop_queue(enum b2r2_core_queue queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+static void printk_regs(struct b2r2_core *core);
+static int hw_reset(struct b2r2_core *core);
+static void timeout_work_function(struct work_struct *ptr);
+#endif
+
+static void reset_hw_timer(struct b2r2_core_job *job);
+static void start_hw_timer(struct b2r2_core_job *job);
+static void stop_hw_timer(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+
+static int init_hw(struct b2r2_core *core);
+static void exit_hw(struct b2r2_core *core);
+
+/* Tracking release bug... */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+/**
+ * ar_add() - Adds an addref or a release to the array
+ *
+ * @core: The b2r2 core entity
+ * @job: The job that has been referenced
+ * @caller: The caller of addref / release
+ * @addref: true if it is an addref else false for release
+ */
+static void ar_add(struct b2r2_core *core, struct b2r2_core_job *job,
+ const char *caller, bool addref)
+{
+ core->ar[core->ar_write].addref = addref;
+ core->ar[core->ar_write].job = job;
+ core->ar[core->ar_write].caller = caller;
+ core->ar[core->ar_write].ref_count = job->ref_count;
+ core->ar_write = (core->ar_write + 1) %
+ ARRAY_SIZE(core->ar);
+ if (core->ar_write == core->ar_read)
+ core->ar_read = (core->ar_read + 1) %
+ ARRAY_SIZE(core->ar);
+}
+
+/**
+ * sprintf_ar() - Writes all addref / release to a string buffer
+ *
+ * @core: The b2r2 core entity
+ * @buf: Receiving character bufefr
+ * @job: Which job to write or NULL for all
+ *
+ * NOTE! No buffer size check!!
+ */
+static char *sprintf_ar(struct b2r2_core *core, char *buf,
+ struct b2r2_core_job *job)
+{
+ int i;
+ int size = 0;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ size += sprintf(buf + size,
+ "%s on %p from %s, ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+
+ return buf;
+}
+
+/**
+ * printk_ar() - Writes all addref / release using dev_info
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to write or NULL for all
+ */
+static void printk_ar(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ int i;
+
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
+ if (!job || job == ar->job)
+ b2r2_log_info(core->dev, "%s on %p from %s,"
+ " ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
+ }
+}
+#endif
+
+/**
+ * internal_job_addref() - Increments the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to increment reference count for
+ * @caller: Name of function calling addref (for debug)
+ *
+ * Note that core->lock _must_ be held
+ */
+static void internal_job_addref(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
+
+ /* Something is wrong, print the addref / release array */
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+
+ /* Do the actual reference count increment */
+ ref_count = ++job->ref_count;
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ /* Keep track of addref / release */
+ ar_add(core, job, caller, true);
+#endif
+
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p): Ref Count is "
+ "%d\n", __func__, caller, core, job, job->ref_count);
+}
+
+/**
+ * internal_job_release() - Decrements the reference count for a job
+ *
+ * @core: The b2r2 core entity
+ * @job: Which job to decrement reference count for
+ * @caller: Name of function calling release (for debug)
+ *
+ * Returns true if job_release should be called by caller
+ * (reference count reached zero).
+ *
+ * Note that core->lock _must_ be held
+ */
+static bool internal_job_release(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
+{
+ u32 ref_count;
+ bool call_release = false;
+
+ /* Sanity checks */
+ BUG_ON(job == NULL);
+
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
+
+ if (job->start_sentinel != START_SENTINEL ||
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
+
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ printk_ar(core, NULL);
+#endif
+ }
+
+ BUG_ON(job->start_sentinel != START_SENTINEL);
+ BUG_ON(job->end_sentinel != END_SENTINEL);
+ BUG_ON(job->ref_count == 0 || job->ref_count > 10);
+
+ /* Do the actual decrement */
+ ref_count = --job->ref_count;
+#ifdef DEBUG_CHECK_ADDREF_RELEASE
+ ar_add(core, job, caller, false);
+#endif
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p) Ref Count is "
+ "%d\n", __func__, caller, core, job, ref_count);
+
+ if (!ref_count && job->release) {
+ call_release = true;
+ /* Job will now cease to exist */
+ job->start_sentinel = 0xFFFFFFFF;
+ job->end_sentinel = 0xFFFFFFFF;
+ }
+ return call_release;
+}
+
+
+
+/* Exported functions */
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ internal_job_addref(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller)
+{
+ unsigned long flags;
+ bool call_release = false;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ call_release = internal_job_release(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ if (call_release)
+ job->release(job);
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %p)\n", __func__, control, job);
+
+ /* Enable B2R2 */
+ domain_enable(core);
+
+ spin_lock_irqsave(&core->lock, flags);
+ core->stat_n_jobs_added++;
+
+ /* Initialise internal job data */
+ init_job(job);
+
+ /* Initial reference, should be released by caller of this function */
+ job->ref_count = 1;
+
+ /* Insert job into prio list */
+ insert_into_prio_list(core, job);
+
+ /* Check if we can dispatch job */
+ check_prio_list(core, false);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return 0;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, job_id);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_job_in_list(job_id, &core->prio_queue);
+
+ if (!job)
+ job = find_job_in_active_jobs(core, job_id);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
+
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, tag);
+
+ spin_lock_irqsave(&core->lock, flags);
+ /* Look through prio queue */
+ job = find_tag_in_list(core, tag, &core->prio_queue);
+
+ if (!job)
+ job = find_tag_in_active_jobs(core, tag);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job;
+}
+
+/**
+ * is_job_done() - Spin lock protected check if job is done
+ *
+ * @job: Job to check
+ *
+ * Returns true if job is done or cancelled
+ *
+ * core->lock must _NOT_ be held when calling this function
+ */
+static bool is_job_done(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ bool job_is_done;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ job_is_done =
+ job->job_state != B2R2_CORE_JOB_QUEUED &&
+ job->job_state != B2R2_CORE_JOB_RUNNING;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return job_is_done;
+}
+
+/**
+ * b2r2_core_job_wait()
+ *
+ * @job:
+ *
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job)
+{
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ b2r2_log_info(core->dev, "%s (%p)\n", __func__, job);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Wait for the job to be done */
+ ret = wait_event_interruptible(
+ job->event,
+ is_job_done(job));
+
+ if (ret)
+ b2r2_log_warn(core->dev,
+ "%s: wait_event_interruptible returns %d state is %d",
+ __func__, ret, job->job_state);
+ return ret;
+}
+
+/**
+ * cancel_job() - Cancels a job (removes it from prio list or active jobs) and
+ * calls the job callback
+ *
+ * @job: Job to cancel
+ *
+ * Returns true if the job was found and cancelled
+ *
+ * core->lock must be held when calling this function
+ */
+static bool cancel_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ bool found_job = false;
+ bool job_was_active = false;
+
+ /* Remove from prio list */
+ if (job->job_state == B2R2_CORE_JOB_QUEUED) {
+ list_del_init(&job->list);
+ found_job = true;
+ }
+
+ /* Remove from active jobs */
+ if (!found_job && core->n_active_jobs > 0) {
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ if (core->active_jobs[i] == job) {
+ stop_queue((enum b2r2_core_queue)i);
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ found_job = true;
+ job_was_active = true;
+ }
+ }
+ }
+
+ /* Handle done list & callback */
+ if (found_job) {
+ /* Job is canceled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ queue_work(core->work_queue, &job->work);
+
+ /* Statistics */
+ if (!job_was_active)
+ core->stat_n_jobs_in_prio_list--;
+
+ }
+
+ return found_job;
+}
+
+/* core->lock _must_ _NOT_ be held when calling this function */
+int b2r2_core_job_cancel(struct b2r2_core_job *job)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ b2r2_log_info(core->dev, "%s (%p) (%d)\n",
+ __func__, job, job->job_state);
+ /* Check that we have the job */
+ if (job->job_state == B2R2_CORE_JOB_IDLE) {
+ /* Never or not queued */
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
+ return -ENOENT;
+ }
+
+ /* Remove from prio list */
+ spin_lock_irqsave(&core->lock, flags);
+ cancel_job(core, job);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return ret;
+}
+
+/* LOCAL FUNCTIONS BELOW */
+
+/**
+ * domain_disable_work_function()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable_work_function(struct work_struct *work)
+{
+ struct delayed_work *twork = to_delayed_work(work);
+ struct b2r2_core *core = container_of(
+ twork, struct b2r2_core, domain_disable_work);
+
+ if (!mutex_trylock(&core->domain_lock))
+ return;
+
+ if (core->domain_request_count == 0) {
+ exit_hw(core);
+ clk_disable(core->b2r2_clock);
+ regulator_disable(core->b2r2_reg);
+ core->domain_enabled = false;
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * domain_enable()
+ *
+ * @core: The b2r2 core entity
+ */
+static int domain_enable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+ core->domain_request_count++;
+
+ if (!core->domain_enabled) {
+ int retry = 0;
+ int ret;
+again:
+ /*
+ * Since regulator_enable() may sleep we have to handle
+ * interrupts.
+ */
+ ret = regulator_enable(core->b2r2_reg);
+ if ((ret == -EAGAIN) &&
+ ((retry++) < B2R2_REGULATOR_RETRY_COUNT))
+ goto again;
+ else if (ret < 0)
+ goto regulator_enable_failed;
+
+ clk_enable(core->b2r2_clock);
+ if (init_hw(core) < 0)
+ goto init_hw_failed;
+ core->domain_enabled = true;
+ }
+
+ mutex_unlock(&core->domain_lock);
+
+ return 0;
+
+init_hw_failed:
+ b2r2_log_err(core->dev,
+ "%s: Could not initialize hardware!\n", __func__);
+
+ clk_disable(core->b2r2_clock);
+
+ if (regulator_disable(core->b2r2_reg) < 0)
+ b2r2_log_err(core->dev, "%s: regulator_disable failed!\n",
+ __func__);
+
+regulator_enable_failed:
+ core->domain_request_count--;
+ mutex_unlock(&core->domain_lock);
+
+ return -EFAULT;
+}
+
+/**
+ * domain_disable()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable(struct b2r2_core *core)
+{
+ mutex_lock(&core->domain_lock);
+
+ if (core->domain_request_count == 0) {
+ b2r2_log_err(core->dev,
+ "%s: Unbalanced domain_disable()\n", __func__);
+ } else {
+ core->domain_request_count--;
+
+ /* Cancel any existing work */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /* Add a work to disable the power and clock after a delay */
+ queue_delayed_work(core->work_queue, &core->domain_disable_work,
+ B2R2_DOMAIN_DISABLE_TIMEOUT);
+ }
+
+ mutex_unlock(&core->domain_lock);
+}
+
+/**
+ * stop_queue() - Stops the specified queue.
+ */
+static void stop_queue(enum b2r2_core_queue queue)
+{
+ /* TODO: Implement! If this function is not implemented canceled jobs
+ * will use b2r2 which is a waste of resources. Not stopping jobs will
+ * also screw up the hardware timing, the job the canceled job
+ * intrerrupted (if any) will be billed for the time between the point
+ * where the job is cancelled and when it stops. */
+}
+
+/**
+ * exit_job_list() - Empties a job queue by canceling the jobs
+ *
+ * @core: The b2r2 core entity
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_queue)
+{
+ while (!list_empty(job_queue)) {
+ struct b2r2_core_job *job =
+ list_entry(job_queue->next,
+ struct b2r2_core_job,
+ list);
+ /* Add reference to prevent job from disappearing
+ in the middle of our work, released below */
+ internal_job_addref(core, job, __func__);
+
+ cancel_job(core, job);
+
+ /* Matching release to addref above */
+ internal_job_release(core, job, __func__);
+
+ }
+}
+
+/**
+ * get_next_job_id() - Return a new job id.
+ *
+ * @core: The b2r2 core entity
+ */
+static int get_next_job_id(struct b2r2_core *core)
+{
+ int job_id;
+
+ if (core->next_job_id < 1)
+ core->next_job_id = 1;
+ job_id = core->next_job_id++;
+
+ return job_id;
+}
+
+/**
+ * job_work_function() - Work queue function that calls callback(s) and
+ * checks if B2R2 can accept a new job
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core_job)
+ */
+static void job_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct b2r2_core_job *job =
+ container_of(ptr, struct b2r2_core_job, work);
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ /* Disable B2R2 */
+ domain_disable(core);
+
+ /* Release resources */
+ if (job->release_resources)
+ job->release_resources(job, false);
+
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Dispatch a new job if possible */
+ check_prio_list(core, false);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Tell the client */
+ if (job->callback)
+ job->callback(job);
+
+ /* Drop our reference, matches the
+ addref in handle_queue_event or b2r2_core_job_cancel */
+ b2r2_core_job_release(job, __func__);
+}
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * timeout_work_function() - Work queue function that checks for
+ * timeout:ed jobs. B2R2 might silently refuse
+ * to execute some jobs, i.e. SRC2 fill
+ *
+ * @ptr: Pointer to work struct (embedded in struct b2r2_core)
+ *
+ */
+static void timeout_work_function(struct work_struct *ptr)
+{
+ unsigned long flags;
+ struct list_head job_list;
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct b2r2_core *core = container_of(twork, struct b2r2_core,
+ timeout_work);
+
+ INIT_LIST_HEAD(&job_list);
+
+ /* Cancel all jobs if too long time since last irq */
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->n_active_jobs > 0) {
+ unsigned long diff =
+ (long) jiffies - (long) core->jiffies_last_irq;
+ if (diff > HZ/2) {
+ /* Active jobs and more than a second since last irq! */
+ int i;
+
+ /* Look for timeout:ed jobs and put them in tmp list.
+ * It's important that the application queues are
+ * killed in order of decreasing priority */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job =
+ core->active_jobs[i];
+
+ if (job) {
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ list_add_tail(&job->list, &job_list);
+ }
+ }
+
+ /* Print the B2R2 register and reset B2R2 */
+ printk_regs(core);
+ hw_reset(core);
+ }
+ }
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Handle timeout:ed jobs */
+ spin_lock_irqsave(&core->lock, flags);
+ while (!list_empty(&job_list)) {
+ struct b2r2_core_job *job =
+ list_entry(job_list.next,
+ struct b2r2_core_job,
+ list);
+
+ b2r2_log_warn(core->dev, "%s: Job timeout\n", __func__);
+
+ list_del_init(&job->list);
+
+ /* Job is cancelled */
+ job->job_state = B2R2_CORE_JOB_CANCELED;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Job callbacks handled via work queue */
+ queue_work(core->work_queue, &job->work);
+ }
+
+ /* Requeue delayed work */
+ if (core->n_active_jobs)
+ queue_delayed_work(
+ core->work_queue,
+ &core->timeout_work, HZ/2);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+}
+#endif
+
+/**
+ * reset_hw_timer() - Resets a job's hardware timer. Must be called before
+ * the timer is used.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void reset_hw_timer(struct b2r2_core_job *job)
+{
+ job->nsec_active_in_hw = 0;
+}
+
+/**
+ * start_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly before starting the
+ * hardware.
+ *
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void start_hw_timer(struct b2r2_core_job *job)
+{
+ job->hw_start_time = b2r2_get_curr_nsec();
+}
+
+/**
+ * stop_hw_timer() - Times how long a job spends in hardware (active).
+ * Should be called immediatly after the hardware has
+ * finished.
+ *
+ * @core: The b2r2 core entity
+ * @job: Pointer to job struct
+ *
+ * core->lock _must_ be held when calling this function
+ */
+static void stop_hw_timer(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Assumes only app queues are used, which is the case right now. */
+ /* Not 100% accurate. When a higher prio job interrupts a lower prio job it does
+ so after the current node of the low prio job has finished. Currently we can not
+ sense when the actual switch takes place so the time reported for a job that
+ interrupts a lower prio job will on average contain the time it takes to process
+ half a node in the lower prio job in addition to the time it takes to process the
+ job's own nodes. This could possibly be solved by adding node notifications but
+ that would involve a significant amount of work and consume system resources due
+ to the extra interrupts. */
+ /* If a job takes more than ~2s (absolute time, including idleing in the hardware)
+ the state of the hardware timer will be corrupted and it will not report valid
+ values until b2r2 becomes idle (no active jobs on any queues). The maximum length
+ can possibly be increased by using 64 bit integers. */
+
+ int i;
+
+ u32 stop_time_raw = b2r2_get_curr_nsec();
+ /* We'll add an offset to all positions in time to make the current time equal to
+ 0xFFFFFFFF. This way we can compare positions in time to each other without having
+ to wory about wrapping (so long as all positions in time are in the past). */
+ u32 stop_time = 0xFFFFFFFF;
+ u32 time_pos_offset = 0xFFFFFFFF - stop_time_raw;
+ u32 nsec_in_hw = stop_time - (job->hw_start_time + time_pos_offset);
+ job->nsec_active_in_hw += (s32)nsec_in_hw;
+
+ /* Check if we have delayed the start of higher prio jobs. Can happen as queue
+ switching only can be done between nodes. */
+ for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job->hw_start_time = stop_time_raw;
+ }
+
+ /* Check if the job has stolen time from lower prio jobs */
+ for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
+ u32 queue_active_job_hw_start_time;
+
+ if (NULL == queue_active_job)
+ continue;
+
+ queue_active_job_hw_start_time =
+ queue_active_job->hw_start_time +
+ time_pos_offset;
+
+ if (queue_active_job_hw_start_time < stop_time) {
+ u32 queue_active_job_nsec_in_hw = stop_time -
+ queue_active_job_hw_start_time;
+ u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw,
+ nsec_in_hw);
+
+ queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec;
+
+ nsec_in_hw -= num_stolen_nsec;
+ stop_time -= num_stolen_nsec;
+ }
+
+ if (0 == nsec_in_hw)
+ break;
+ }
+}
+
+/**
+ * init_job() - Initializes a job structure from filled in client data.
+ * Reference count will be set to 1
+ *
+ * @job: Job to initialize
+ */
+static void init_job(struct b2r2_core_job *job)
+{
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ job->start_sentinel = START_SENTINEL;
+ job->end_sentinel = END_SENTINEL;
+
+ /* Get a job id*/
+ job->job_id = get_next_job_id(core);
+
+ /* Job is idle, never queued */
+ job->job_state = B2R2_CORE_JOB_IDLE;
+
+ /* Initialize internal data */
+ INIT_LIST_HEAD(&job->list);
+ init_waitqueue_head(&job->event);
+ INIT_WORK(&job->work, job_work_function);
+
+ /* Map given prio to B2R2 queues */
+ if (job->prio < B2R2_CORE_LOWEST_PRIO)
+ job->prio = B2R2_CORE_LOWEST_PRIO;
+ else if (job->prio > B2R2_CORE_HIGHEST_PRIO)
+ job->prio = B2R2_CORE_HIGHEST_PRIO;
+
+ if (job->prio > 10) {
+ job->queue = B2R2_CORE_QUEUE_AQ1;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ1_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_3);
+ } else if (job->prio > 0) {
+ job->queue = B2R2_CORE_QUEUE_AQ2;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ2_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_2);
+ } else if (job->prio > -10) {
+ job->queue = B2R2_CORE_QUEUE_AQ3;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ3_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_1);
+ } else {
+ job->queue = B2R2_CORE_QUEUE_AQ4;
+ job->interrupt_context =
+ (B2R2BLT_ITSAQ4_LNA_Reached);
+ job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_0);
+ }
+}
+
+/**
+ * clear_interrupts() - Disables all interrupts
+ *
+ * core->lock _must_ be held
+ */
+static void clear_interrupts(struct b2r2_core *core)
+{
+ writel(0x0, &core->hw->BLT_ITM0);
+ writel(0x0, &core->hw->BLT_ITM1);
+ writel(0x0, &core->hw->BLT_ITM2);
+ writel(0x0, &core->hw->BLT_ITM3);
+}
+
+/**
+ * insert_into_prio_list() - Inserts the job into the sorted list of jobs.
+ * The list is sorted by priority.
+ *
+ * @core: The b2r2 core entity
+ * @job: Job to insert
+ *
+ * core->lock _must_ be held
+ */
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job)
+{
+ /* Ref count is increased when job put in list,
+ should be released when job is removed from list */
+ internal_job_addref(core, job, __func__);
+
+ core->stat_n_jobs_in_prio_list++;
+
+ /* Sort in the job */
+ if (list_empty(&core->prio_queue))
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ struct b2r2_core_job *first_job = list_entry(
+ core->prio_queue.next,
+ struct b2r2_core_job, list);
+ struct b2r2_core_job *last_job = list_entry(
+ core->prio_queue.prev,
+ struct b2r2_core_job, list);
+
+ if (job->prio > first_job->prio)
+ list_add(&job->list, &core->prio_queue);
+ else if (job->prio <= last_job->prio)
+ list_add_tail(&job->list, &core->prio_queue);
+ else {
+ /* We need to find where to put it */
+ struct list_head *ptr;
+
+ list_for_each(ptr, &core->prio_queue) {
+ struct b2r2_core_job *list_job =
+ list_entry(ptr, struct b2r2_core_job,
+ list);
+ if (job->prio > list_job->prio) {
+ list_add_tail(&job->list,
+ &list_job->list);
+ break;
+ }
+ }
+ }
+ }
+ /* The job is now queued */
+ job->job_state = B2R2_CORE_JOB_QUEUED;
+}
+
+/**
+ * check_prio_list() - Checks if the first job(s) in the prio list can
+ * be dispatched to B2R2
+ *
+ * @core: The b2r2 core entity
+ * @atomic: true if in atomic context (i.e. interrupt context)
+ *
+ * core->lock _must_ be held
+ */
+static void check_prio_list(struct b2r2_core *core, bool atomic)
+{
+ bool dispatched_job;
+ int n_dispatched = 0;
+ struct b2r2_core_job *job;
+
+ do {
+ dispatched_job = false;
+
+ /* Do we have anything in our prio list? */
+ if (list_empty(&core->prio_queue))
+ break;
+
+ /* The first job waiting */
+ job = list_first_entry(&core->prio_queue,
+ struct b2r2_core_job, list);
+
+ /* Is the B2R2 queue available? */
+ if (core->active_jobs[job->queue] != NULL)
+ break;
+
+ /* Can we acquire resources? */
+ if (!job->acquire_resources ||
+ job->acquire_resources(job, atomic) == 0) {
+ /* Ok to dispatch job */
+
+ /* Remove from list */
+ list_del_init(&job->list);
+
+ /* The job is now active */
+ core->active_jobs[job->queue] = job;
+ core->n_active_jobs++;
+ job->jiffies = jiffies;
+ core->jiffies_last_active = jiffies;
+
+ /* Kick off B2R2 */
+ trigger_job(core, job);
+ dispatched_job = true;
+ n_dispatched++;
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Check in one half second if it hangs */
+ queue_delayed_work(core->work_queue,
+ &core->timeout_work, HZ/2);
+#endif
+ } else {
+ /* No resources */
+ if (!atomic && core->n_active_jobs == 0) {
+ b2r2_log_warn(core->dev,
+ "%s: No resource", __func__);
+ cancel_job(core, job);
+ }
+ }
+ } while (dispatched_job);
+
+ core->stat_n_jobs_in_prio_list -= n_dispatched;
+}
+
+/**
+ * find_job_in_list() - Finds job with job_id in list
+ *
+ * @jobid: Job id to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job = list_entry(
+ ptr, struct b2r2_core_job, list);
+ if (job->job_id == job_id) {
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_core *core = instance->control->data;
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_job_in_active_jobs() - Finds job in active job queues
+ *
+ * @core: The b2r2 core entity
+ * @job_id: Job id to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock _must_ be held
+ */
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->job_id == job_id) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+/**
+ * find_tag_in_list() - Finds first job with tag in list
+ *
+ * @tag: Tag to find
+ * @list: List to find job id in
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list)
+{
+ struct list_head *ptr;
+
+ list_for_each(ptr, list) {
+ struct b2r2_core_job *job =
+ list_entry(ptr, struct b2r2_core_job, list);
+ if (job->tag == tag) {
+ /* Increase reference count, should be released by
+ the caller of b2r2_core_job_find */
+ internal_job_addref(core, job, __func__);
+ return job;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * find_tag_in_active_jobs() - Finds job with tag in active job queues
+ *
+ * @tag: Tag to find
+ *
+ * Reference count will be incremented for found job.
+ *
+ * core->lock must be held
+ */
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag)
+{
+ int i;
+ struct b2r2_core_job *found_job = NULL;
+
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
+
+ if (job && job->tag == tag) {
+ internal_job_addref(core, job, __func__);
+ found_job = job;
+ break;
+ }
+ }
+ }
+ return found_job;
+}
+
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * hw_reset() - Resets B2R2 hardware
+ *
+ * core->lock must be held
+ */
+static int hw_reset(struct b2r2_core *core)
+{
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Tell B2R2 to reset */
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+
+ if (uTimeOut == 0) {
+ b2r2_log_warn(core->dev,
+ "error-> after software reset B2R2 is not idle\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+
+}
+#endif
+
+/**
+ * trigger_job() - Put job in B2R2 HW queue
+ *
+ * @job: Job to trigger
+ *
+ * core->lock must be held
+ */
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job)
+{
+ /* Debug prints */
+ b2r2_log_info(core->dev, "queue 0x%x\n", job->queue);
+ b2r2_log_info(core->dev, "BLT TRIG_IP 0x%x (first node)\n",
+ job->first_node_address);
+ b2r2_log_info(core->dev, "BLT LNA_CTL 0x%x (last node)\n",
+ job->last_node_address);
+ b2r2_log_info(core->dev, "BLT TRIG_CTL 0x%x\n", job->control);
+ b2r2_log_info(core->dev, "BLT PACE_CTL 0x%x\n", job->pace_control);
+
+ reset_hw_timer(job);
+ job->job_state = B2R2_CORE_JOB_RUNNING;
+
+ /* Enable interrupt */
+ writel(readl(&core->hw->BLT_ITM0) | job->interrupt_context,
+ &core->hw->BLT_ITM0);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS1_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS1_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS1_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS1_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS2_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS2_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS2_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS2_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS3_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS3_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS3_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS3_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGT_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGT_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGT_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGT_PGZ);
+
+ /* B2R2 kicks off when LNA is written, LNA write must be last! */
+ switch (job->queue) {
+ case B2R2_CORE_QUEUE_CQ1:
+ writel(job->first_node_address, &core->hw->BLT_CQ1_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ1_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ1_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_CQ2:
+ writel(job->first_node_address, &core->hw->BLT_CQ2_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ2_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ2_PACE_CTL);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ1:
+ writel(job->control, &core->hw->BLT_AQ1_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ1_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ1_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ2:
+ writel(job->control, &core->hw->BLT_AQ2_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ2_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ2_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ3:
+ writel(job->control, &core->hw->BLT_AQ3_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ3_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ3_LNA);
+ break;
+
+ case B2R2_CORE_QUEUE_AQ4:
+ writel(job->control, &core->hw->BLT_AQ4_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ4_IP);
+ wmb();
+ start_hw_timer(job);
+ writel(job->last_node_address, &core->hw->BLT_AQ4_LNA);
+ break;
+
+ /** Handle the default case */
+ default:
+ break;
+
+ } /* end switch */
+
+}
+
+/**
+ * handle_queue_event() - Handles interrupt event for specified B2R2 queue
+ *
+ * @queue: Queue to handle event for
+ *
+ * core->lock must be held
+ */
+static void handle_queue_event(struct b2r2_core *core,
+ enum b2r2_core_queue queue)
+{
+ struct b2r2_core_job *job;
+
+ job = core->active_jobs[queue];
+ if (job) {
+ if (job->job_state != B2R2_CORE_JOB_RUNNING)
+ /* Should be running
+ Severe error. TBD */
+ b2r2_log_warn(core->dev,
+ "%s: Job is not running", __func__);
+
+ stop_hw_timer(core, job);
+
+ /* Remove from queue */
+ BUG_ON(core->n_active_jobs == 0);
+ core->active_jobs[queue] = NULL;
+ core->n_active_jobs--;
+ }
+
+ if (!job) {
+ /* No job, error? */
+ b2r2_log_warn(core->dev, "%s: No job", __func__);
+ return;
+ }
+
+
+ /* Atomic context release resources, release resources will
+ be called again later from process context (work queue) */
+ if (job->release_resources)
+ job->release_resources(job, true);
+
+ /* Job is done */
+ job->job_state = B2R2_CORE_JOB_DONE;
+
+ /* Handle done */
+ wake_up_interruptible(&job->event);
+
+ /* Dispatch to work queue to handle callbacks */
+ queue_work(core->work_queue, &job->work);
+}
+
+/**
+ * process_events() - Handles interrupt events
+ *
+ * @status: Contents of the B2R2 ITS register
+ */
+static void process_events(struct b2r2_core *core, u32 status)
+{
+ u32 mask = 0xF;
+ u32 disable_itm_mask = 0;
+
+ b2r2_log_info(core->dev, "Enters process_events\n");
+ b2r2_log_info(core->dev, "status 0x%x\n", status);
+
+ /* Composition queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Composition queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 8;
+
+ /* Application queue 1 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ1);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 2 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ2);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 3 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ3);
+ disable_itm_mask |= mask;
+ }
+ mask <<= 4;
+
+ /* Application queue 4 */
+ if (status & mask) {
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ4);
+ disable_itm_mask |= mask;
+ }
+
+ /* Clear received interrupt flags */
+ writel(status, &core->hw->BLT_ITS);
+ /* Disable handled interrupts */
+ writel(readl(&core->hw->BLT_ITM0) & ~disable_itm_mask,
+ &core->hw->BLT_ITM0);
+
+ b2r2_log_info(core->dev, "Returns process_events\n");
+}
+
+/**
+ * b2r2_irq_handler() - B2R2 interrupt handler
+ *
+ * @irq: Interrupt number (not used)
+ * @x: ??? (Not used)
+ */
+static irqreturn_t b2r2_irq_handler(int irq, void *x)
+{
+ unsigned long flags;
+ struct b2r2_core* core = (struct b2r2_core *) x;
+
+ /* Spin lock is need in irq handler (SMP) */
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Make sure that we have a clock */
+
+ /* Remember time for last irq (for timeout mgmt) */
+ core->jiffies_last_irq = jiffies;
+ core->stat_n_irq++;
+
+ /* Handle the interrupt(s) */
+ process_events(core, readl(&core->hw->BLT_ITS));
+
+ /* Check if we can dispatch new jobs */
+ check_prio_list(core, true);
+
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * struct debugfs_reg - Represents one B2R2 register in debugfs
+ *
+ * @name: Register name
+ * @offset: Byte offset in B2R2 for register
+ */
+struct debugfs_reg {
+ const char name[30];
+ u32 offset;
+};
+
+/**
+ * debugfs_regs - Array of B2R2 debugfs registers
+ */
+static const struct debugfs_reg debugfs_regs[] = {
+ {"BLT_SSBA17", offsetof(struct b2r2_memory_map, BLT_SSBA17)},
+ {"BLT_SSBA18", offsetof(struct b2r2_memory_map, BLT_SSBA18)},
+ {"BLT_SSBA19", offsetof(struct b2r2_memory_map, BLT_SSBA19)},
+ {"BLT_SSBA20", offsetof(struct b2r2_memory_map, BLT_SSBA20)},
+ {"BLT_SSBA21", offsetof(struct b2r2_memory_map, BLT_SSBA21)},
+ {"BLT_SSBA22", offsetof(struct b2r2_memory_map, BLT_SSBA22)},
+ {"BLT_SSBA23", offsetof(struct b2r2_memory_map, BLT_SSBA23)},
+ {"BLT_SSBA24", offsetof(struct b2r2_memory_map, BLT_SSBA24)},
+ {"BLT_STBA5", offsetof(struct b2r2_memory_map, BLT_STBA5)},
+ {"BLT_STBA6", offsetof(struct b2r2_memory_map, BLT_STBA6)},
+ {"BLT_STBA7", offsetof(struct b2r2_memory_map, BLT_STBA7)},
+ {"BLT_STBA8", offsetof(struct b2r2_memory_map, BLT_STBA8)},
+ {"BLT_CTL", offsetof(struct b2r2_memory_map, BLT_CTL)},
+ {"BLT_ITS", offsetof(struct b2r2_memory_map, BLT_ITS)},
+ {"BLT_STA1", offsetof(struct b2r2_memory_map, BLT_STA1)},
+ {"BLT_SSBA1", offsetof(struct b2r2_memory_map, BLT_SSBA1)},
+ {"BLT_SSBA2", offsetof(struct b2r2_memory_map, BLT_SSBA2)},
+ {"BLT_SSBA3", offsetof(struct b2r2_memory_map, BLT_SSBA3)},
+ {"BLT_SSBA4", offsetof(struct b2r2_memory_map, BLT_SSBA4)},
+ {"BLT_SSBA5", offsetof(struct b2r2_memory_map, BLT_SSBA5)},
+ {"BLT_SSBA6", offsetof(struct b2r2_memory_map, BLT_SSBA6)},
+ {"BLT_SSBA7", offsetof(struct b2r2_memory_map, BLT_SSBA7)},
+ {"BLT_SSBA8", offsetof(struct b2r2_memory_map, BLT_SSBA8)},
+ {"BLT_STBA1", offsetof(struct b2r2_memory_map, BLT_STBA1)},
+ {"BLT_STBA2", offsetof(struct b2r2_memory_map, BLT_STBA2)},
+ {"BLT_STBA3", offsetof(struct b2r2_memory_map, BLT_STBA3)},
+ {"BLT_STBA4", offsetof(struct b2r2_memory_map, BLT_STBA4)},
+ {"BLT_CQ1_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_TRIG_IP)},
+ {"BLT_CQ1_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_TRIG_CTL)},
+ {"BLT_CQ1_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ1_PACE_CTL)},
+ {"BLT_CQ1_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_IP)},
+ {"BLT_CQ2_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_TRIG_IP)},
+ {"BLT_CQ2_TRIG_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_TRIG_CTL)},
+ {"BLT_CQ2_PACE_CTL", offsetof(struct b2r2_memory_map,
+ BLT_CQ2_PACE_CTL)},
+ {"BLT_CQ2_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_IP)},
+ {"BLT_AQ1_CTL", offsetof(struct b2r2_memory_map, BLT_AQ1_CTL)},
+ {"BLT_AQ1_IP", offsetof(struct b2r2_memory_map, BLT_AQ1_IP)},
+ {"BLT_AQ1_LNA", offsetof(struct b2r2_memory_map, BLT_AQ1_LNA)},
+ {"BLT_AQ1_STA", offsetof(struct b2r2_memory_map, BLT_AQ1_STA)},
+ {"BLT_AQ2_CTL", offsetof(struct b2r2_memory_map, BLT_AQ2_CTL)},
+ {"BLT_AQ2_IP", offsetof(struct b2r2_memory_map, BLT_AQ2_IP)},
+ {"BLT_AQ2_LNA", offsetof(struct b2r2_memory_map, BLT_AQ2_LNA)},
+ {"BLT_AQ2_STA", offsetof(struct b2r2_memory_map, BLT_AQ2_STA)},
+ {"BLT_AQ3_CTL", offsetof(struct b2r2_memory_map, BLT_AQ3_CTL)},
+ {"BLT_AQ3_IP", offsetof(struct b2r2_memory_map, BLT_AQ3_IP)},
+ {"BLT_AQ3_LNA", offsetof(struct b2r2_memory_map, BLT_AQ3_LNA)},
+ {"BLT_AQ3_STA", offsetof(struct b2r2_memory_map, BLT_AQ3_STA)},
+ {"BLT_AQ4_CTL", offsetof(struct b2r2_memory_map, BLT_AQ4_CTL)},
+ {"BLT_AQ4_IP", offsetof(struct b2r2_memory_map, BLT_AQ4_IP)},
+ {"BLT_AQ4_LNA", offsetof(struct b2r2_memory_map, BLT_AQ4_LNA)},
+ {"BLT_AQ4_STA", offsetof(struct b2r2_memory_map, BLT_AQ4_STA)},
+ {"BLT_SSBA9", offsetof(struct b2r2_memory_map, BLT_SSBA9)},
+ {"BLT_SSBA10", offsetof(struct b2r2_memory_map, BLT_SSBA10)},
+ {"BLT_SSBA11", offsetof(struct b2r2_memory_map, BLT_SSBA11)},
+ {"BLT_SSBA12", offsetof(struct b2r2_memory_map, BLT_SSBA12)},
+ {"BLT_SSBA13", offsetof(struct b2r2_memory_map, BLT_SSBA13)},
+ {"BLT_SSBA14", offsetof(struct b2r2_memory_map, BLT_SSBA14)},
+ {"BLT_SSBA15", offsetof(struct b2r2_memory_map, BLT_SSBA15)},
+ {"BLT_SSBA16", offsetof(struct b2r2_memory_map, BLT_SSBA16)},
+ {"BLT_SGA1", offsetof(struct b2r2_memory_map, BLT_SGA1)},
+ {"BLT_SGA2", offsetof(struct b2r2_memory_map, BLT_SGA2)},
+ {"BLT_ITM0", offsetof(struct b2r2_memory_map, BLT_ITM0)},
+ {"BLT_ITM1", offsetof(struct b2r2_memory_map, BLT_ITM1)},
+ {"BLT_ITM2", offsetof(struct b2r2_memory_map, BLT_ITM2)},
+ {"BLT_ITM3", offsetof(struct b2r2_memory_map, BLT_ITM3)},
+ {"BLT_DFV2", offsetof(struct b2r2_memory_map, BLT_DFV2)},
+ {"BLT_DFV1", offsetof(struct b2r2_memory_map, BLT_DFV1)},
+ {"BLT_PRI", offsetof(struct b2r2_memory_map, BLT_PRI)},
+ {"PLUGS1_OP2", offsetof(struct b2r2_memory_map, PLUGS1_OP2)},
+ {"PLUGS1_CHZ", offsetof(struct b2r2_memory_map, PLUGS1_CHZ)},
+ {"PLUGS1_MSZ", offsetof(struct b2r2_memory_map, PLUGS1_MSZ)},
+ {"PLUGS1_PGZ", offsetof(struct b2r2_memory_map, PLUGS1_PGZ)},
+ {"PLUGS2_OP2", offsetof(struct b2r2_memory_map, PLUGS2_OP2)},
+ {"PLUGS2_CHZ", offsetof(struct b2r2_memory_map, PLUGS2_CHZ)},
+ {"PLUGS2_MSZ", offsetof(struct b2r2_memory_map, PLUGS2_MSZ)},
+ {"PLUGS2_PGZ", offsetof(struct b2r2_memory_map, PLUGS2_PGZ)},
+ {"PLUGS3_OP2", offsetof(struct b2r2_memory_map, PLUGS3_OP2)},
+ {"PLUGS3_CHZ", offsetof(struct b2r2_memory_map, PLUGS3_CHZ)},
+ {"PLUGS3_MSZ", offsetof(struct b2r2_memory_map, PLUGS3_MSZ)},
+ {"PLUGS3_PGZ", offsetof(struct b2r2_memory_map, PLUGS3_PGZ)},
+ {"PLUGT_OP2", offsetof(struct b2r2_memory_map, PLUGT_OP2)},
+ {"PLUGT_CHZ", offsetof(struct b2r2_memory_map, PLUGT_CHZ)},
+ {"PLUGT_MSZ", offsetof(struct b2r2_memory_map, PLUGT_MSZ)},
+ {"PLUGT_PGZ", offsetof(struct b2r2_memory_map, PLUGT_PGZ)},
+ {"BLT_NIP", offsetof(struct b2r2_memory_map, BLT_NIP)},
+ {"BLT_CIC", offsetof(struct b2r2_memory_map, BLT_CIC)},
+ {"BLT_INS", offsetof(struct b2r2_memory_map, BLT_INS)},
+ {"BLT_ACK", offsetof(struct b2r2_memory_map, BLT_ACK)},
+ {"BLT_TBA", offsetof(struct b2r2_memory_map, BLT_TBA)},
+ {"BLT_TTY", offsetof(struct b2r2_memory_map, BLT_TTY)},
+ {"BLT_TXY", offsetof(struct b2r2_memory_map, BLT_TXY)},
+ {"BLT_TSZ", offsetof(struct b2r2_memory_map, BLT_TSZ)},
+ {"BLT_S1CF", offsetof(struct b2r2_memory_map, BLT_S1CF)},
+ {"BLT_S2CF", offsetof(struct b2r2_memory_map, BLT_S2CF)},
+ {"BLT_S1BA", offsetof(struct b2r2_memory_map, BLT_S1BA)},
+ {"BLT_S1TY", offsetof(struct b2r2_memory_map, BLT_S1TY)},
+ {"BLT_S1XY", offsetof(struct b2r2_memory_map, BLT_S1XY)},
+ {"BLT_S2BA", offsetof(struct b2r2_memory_map, BLT_S2BA)},
+ {"BLT_S2TY", offsetof(struct b2r2_memory_map, BLT_S2TY)},
+ {"BLT_S2XY", offsetof(struct b2r2_memory_map, BLT_S2XY)},
+ {"BLT_S2SZ", offsetof(struct b2r2_memory_map, BLT_S2SZ)},
+ {"BLT_S3BA", offsetof(struct b2r2_memory_map, BLT_S3BA)},
+ {"BLT_S3TY", offsetof(struct b2r2_memory_map, BLT_S3TY)},
+ {"BLT_S3XY", offsetof(struct b2r2_memory_map, BLT_S3XY)},
+ {"BLT_S3SZ", offsetof(struct b2r2_memory_map, BLT_S3SZ)},
+ {"BLT_CWO", offsetof(struct b2r2_memory_map, BLT_CWO)},
+ {"BLT_CWS", offsetof(struct b2r2_memory_map, BLT_CWS)},
+ {"BLT_CCO", offsetof(struct b2r2_memory_map, BLT_CCO)},
+ {"BLT_CML", offsetof(struct b2r2_memory_map, BLT_CML)},
+ {"BLT_FCTL", offsetof(struct b2r2_memory_map, BLT_FCTL)},
+ {"BLT_PMK", offsetof(struct b2r2_memory_map, BLT_PMK)},
+ {"BLT_RSF", offsetof(struct b2r2_memory_map, BLT_RSF)},
+ {"BLT_RZI", offsetof(struct b2r2_memory_map, BLT_RZI)},
+ {"BLT_HFP", offsetof(struct b2r2_memory_map, BLT_HFP)},
+ {"BLT_VFP", offsetof(struct b2r2_memory_map, BLT_VFP)},
+ {"BLT_Y_RSF", offsetof(struct b2r2_memory_map, BLT_Y_RSF)},
+ {"BLT_Y_RZI", offsetof(struct b2r2_memory_map, BLT_Y_RZI)},
+ {"BLT_Y_HFP", offsetof(struct b2r2_memory_map, BLT_Y_HFP)},
+ {"BLT_Y_VFP", offsetof(struct b2r2_memory_map, BLT_Y_VFP)},
+ {"BLT_KEY1", offsetof(struct b2r2_memory_map, BLT_KEY1)},
+ {"BLT_KEY2", offsetof(struct b2r2_memory_map, BLT_KEY2)},
+ {"BLT_SAR", offsetof(struct b2r2_memory_map, BLT_SAR)},
+ {"BLT_USR", offsetof(struct b2r2_memory_map, BLT_USR)},
+ {"BLT_IVMX0", offsetof(struct b2r2_memory_map, BLT_IVMX0)},
+ {"BLT_IVMX1", offsetof(struct b2r2_memory_map, BLT_IVMX1)},
+ {"BLT_IVMX2", offsetof(struct b2r2_memory_map, BLT_IVMX2)},
+ {"BLT_IVMX3", offsetof(struct b2r2_memory_map, BLT_IVMX3)},
+ {"BLT_OVMX0", offsetof(struct b2r2_memory_map, BLT_OVMX0)},
+ {"BLT_OVMX1", offsetof(struct b2r2_memory_map, BLT_OVMX1)},
+ {"BLT_OVMX2", offsetof(struct b2r2_memory_map, BLT_OVMX2)},
+ {"BLT_OVMX3", offsetof(struct b2r2_memory_map, BLT_OVMX3)},
+ {"BLT_VC1R", offsetof(struct b2r2_memory_map, BLT_VC1R)},
+ {"BLT_Y_HFC0", offsetof(struct b2r2_memory_map, BLT_Y_HFC0)},
+ {"BLT_Y_HFC1", offsetof(struct b2r2_memory_map, BLT_Y_HFC1)},
+ {"BLT_Y_HFC2", offsetof(struct b2r2_memory_map, BLT_Y_HFC2)},
+ {"BLT_Y_HFC3", offsetof(struct b2r2_memory_map, BLT_Y_HFC3)},
+ {"BLT_Y_HFC4", offsetof(struct b2r2_memory_map, BLT_Y_HFC4)},
+ {"BLT_Y_HFC5", offsetof(struct b2r2_memory_map, BLT_Y_HFC5)},
+ {"BLT_Y_HFC6", offsetof(struct b2r2_memory_map, BLT_Y_HFC6)},
+ {"BLT_Y_HFC7", offsetof(struct b2r2_memory_map, BLT_Y_HFC7)},
+ {"BLT_Y_HFC8", offsetof(struct b2r2_memory_map, BLT_Y_HFC8)},
+ {"BLT_Y_HFC9", offsetof(struct b2r2_memory_map, BLT_Y_HFC9)},
+ {"BLT_Y_HFC10", offsetof(struct b2r2_memory_map, BLT_Y_HFC10)},
+ {"BLT_Y_HFC11", offsetof(struct b2r2_memory_map, BLT_Y_HFC11)},
+ {"BLT_Y_HFC12", offsetof(struct b2r2_memory_map, BLT_Y_HFC12)},
+ {"BLT_Y_HFC13", offsetof(struct b2r2_memory_map, BLT_Y_HFC13)},
+ {"BLT_Y_HFC14", offsetof(struct b2r2_memory_map, BLT_Y_HFC14)},
+ {"BLT_Y_HFC15", offsetof(struct b2r2_memory_map, BLT_Y_HFC15)},
+ {"BLT_Y_VFC0", offsetof(struct b2r2_memory_map, BLT_Y_VFC0)},
+ {"BLT_Y_VFC1", offsetof(struct b2r2_memory_map, BLT_Y_VFC1)},
+ {"BLT_Y_VFC2", offsetof(struct b2r2_memory_map, BLT_Y_VFC2)},
+ {"BLT_Y_VFC3", offsetof(struct b2r2_memory_map, BLT_Y_VFC3)},
+ {"BLT_Y_VFC4", offsetof(struct b2r2_memory_map, BLT_Y_VFC4)},
+ {"BLT_Y_VFC5", offsetof(struct b2r2_memory_map, BLT_Y_VFC5)},
+ {"BLT_Y_VFC6", offsetof(struct b2r2_memory_map, BLT_Y_VFC6)},
+ {"BLT_Y_VFC7", offsetof(struct b2r2_memory_map, BLT_Y_VFC7)},
+ {"BLT_Y_VFC8", offsetof(struct b2r2_memory_map, BLT_Y_VFC8)},
+ {"BLT_Y_VFC9", offsetof(struct b2r2_memory_map, BLT_Y_VFC9)},
+ {"BLT_HFC0", offsetof(struct b2r2_memory_map, BLT_HFC0)},
+ {"BLT_HFC1", offsetof(struct b2r2_memory_map, BLT_HFC1)},
+ {"BLT_HFC2", offsetof(struct b2r2_memory_map, BLT_HFC2)},
+ {"BLT_HFC3", offsetof(struct b2r2_memory_map, BLT_HFC3)},
+ {"BLT_HFC4", offsetof(struct b2r2_memory_map, BLT_HFC4)},
+ {"BLT_HFC5", offsetof(struct b2r2_memory_map, BLT_HFC5)},
+ {"BLT_HFC6", offsetof(struct b2r2_memory_map, BLT_HFC6)},
+ {"BLT_HFC7", offsetof(struct b2r2_memory_map, BLT_HFC7)},
+ {"BLT_HFC8", offsetof(struct b2r2_memory_map, BLT_HFC8)},
+ {"BLT_HFC9", offsetof(struct b2r2_memory_map, BLT_HFC9)},
+ {"BLT_HFC10", offsetof(struct b2r2_memory_map, BLT_HFC10)},
+ {"BLT_HFC11", offsetof(struct b2r2_memory_map, BLT_HFC11)},
+ {"BLT_HFC12", offsetof(struct b2r2_memory_map, BLT_HFC12)},
+ {"BLT_HFC13", offsetof(struct b2r2_memory_map, BLT_HFC13)},
+ {"BLT_HFC14", offsetof(struct b2r2_memory_map, BLT_HFC14)},
+ {"BLT_HFC15", offsetof(struct b2r2_memory_map, BLT_HFC15)},
+ {"BLT_VFC0", offsetof(struct b2r2_memory_map, BLT_VFC0)},
+ {"BLT_VFC1", offsetof(struct b2r2_memory_map, BLT_VFC1)},
+ {"BLT_VFC2", offsetof(struct b2r2_memory_map, BLT_VFC2)},
+ {"BLT_VFC3", offsetof(struct b2r2_memory_map, BLT_VFC3)},
+ {"BLT_VFC4", offsetof(struct b2r2_memory_map, BLT_VFC4)},
+ {"BLT_VFC5", offsetof(struct b2r2_memory_map, BLT_VFC5)},
+ {"BLT_VFC6", offsetof(struct b2r2_memory_map, BLT_VFC6)},
+ {"BLT_VFC7", offsetof(struct b2r2_memory_map, BLT_VFC7)},
+ {"BLT_VFC8", offsetof(struct b2r2_memory_map, BLT_VFC8)},
+ {"BLT_VFC9", offsetof(struct b2r2_memory_map, BLT_VFC9)},
+};
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+/**
+ * printk_regs() - Print B2R2 registers to printk
+ */
+static void printk_regs(struct b2r2_core *core)
+{
+#ifdef CONFIG_B2R2_DEBUG
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value = readl(
+ (unsigned long *) (((u8 *) core->hw) +
+ debugfs_regs[i].offset));
+ b2r2_log_regdump(core->dev, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+#endif
+}
+#endif
+
+/**
+ * debugfs_b2r2_reg_read() - Implements debugfs read for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size;
+ int ret = 0;
+ unsigned long value;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Read from B2R2 */
+ value = readl((unsigned long *)
+ filp->f_dentry->d_inode->i_private);
+
+ /* Build the string */
+ dev_size = sprintf(Buf, "%8lX\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ /* Return it to user space */
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_write() - Implements debugfs write for B2R2 register
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ /* Adjust count */
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ /* Get it from user space */
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ /* Convert from hex string */
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ writel(reg_value, (u32 *)
+ filp->f_dentry->d_inode->i_private);
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_reg_fops() - File operations for B2R2 register debugfs
+ */
+static const struct file_operations debugfs_b2r2_reg_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_reg_read,
+ .write = debugfs_b2r2_reg_write,
+};
+
+/**
+ * debugfs_b2r2_regs_read() - Implements debugfs read for B2R2 register dump
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a giant string containing all registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ unsigned long value =
+ readl((u32 *) (((u8 *)
+ filp->f_dentry->d_inode->i_private) +
+ debugfs_regs[i].offset));
+ dev_size += sprintf(Buf + dev_size, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
+ }
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_regs_fops() - File operations for B2R2 register dump debugfs
+ */
+static const struct file_operations debugfs_b2r2_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_regs_read,
+};
+
+/**
+ * debugfs_b2r2_stat_read() - Implements debugfs read for B2R2 statistics
+ *
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ size_t dev_size = 0;
+ int ret = 0;
+ int i = 0;
+ char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ if (Buf == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Build a string containing all statistics */
+ dev_size += sprintf(Buf + dev_size, "Interrupts : %lu\n",
+ core->stat_n_irq);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ core->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Removed jobs : %lu\n",
+ core->stat_n_jobs_removed);
+ dev_size += sprintf(Buf + dev_size, "Jobs in prio list : %lu\n",
+ core->stat_n_jobs_in_prio_list);
+ dev_size += sprintf(Buf + dev_size, "Active jobs : %lu\n",
+ core->n_active_jobs);
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++)
+ dev_size += sprintf(Buf + dev_size,
+ " Job in queue %d : 0x%08lx\n",
+ i, (unsigned long) core->active_jobs[i]);
+ dev_size += sprintf(Buf + dev_size, "Clock requests : %lu\n",
+ core->clock_request_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ if (Buf != NULL)
+ kfree(Buf);
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_stat_fops() - File operations for B2R2 statistics debugfs
+ */
+static const struct file_operations debugfs_b2r2_stat_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_stat_read,
+};
+
+
+/**
+ * debugfs_b2r2_clock_read() - Implements debugfs read for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to read
+ * @f_pos: File position
+ *
+ * Returns number of bytes read or negative error code
+ */
+static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /* 10 characters hex number + newline + string terminator; */
+ char Buf[10+2];
+ size_t dev_size;
+ int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
+
+ unsigned long value = clk_get_rate(core->b2r2_clock);
+
+ dev_size = sprintf(Buf, "%#010lx\n", value);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_write() - Implements debugfs write for
+ * PMU B2R2 clock register
+ * @filp: File pointer
+ * @buf: User space buffer
+ * @count: Number of bytes to write
+ * @f_pos: File position
+ *
+ * Returns number of bytes written or negative error code
+ */
+static int debugfs_b2r2_clock_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ char Buf[80];
+ u32 reg_value;
+ int ret = 0;
+
+ if (count >= sizeof(Buf))
+ count = sizeof(Buf) - 1;
+ if (copy_from_user(Buf, buf, count))
+ return -EINVAL;
+ Buf[count] = 0;
+ if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
+ return -EINVAL;
+
+ /*not working yet*/
+ /*clk_set_rate(b2r2_core.b2r2_clock, (unsigned long) reg_value);*/
+
+ *f_pos += count;
+ ret = count;
+
+ return ret;
+}
+
+/**
+ * debugfs_b2r2_clock_fops() - File operations for PMU B2R2 clock debugfs
+ */
+static const struct file_operations debugfs_b2r2_clock_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_clock_read,
+ .write = debugfs_b2r2_clock_write,
+};
+
+#endif
+
+/**
+ *
+ * init_hw() - B2R2 Hardware reset & initiliaze
+ *
+ * @pdev: B2R2 platform device
+ *
+ * 1)Register interrupt handler
+ *
+ * 2)B2R2 Register map
+ *
+ * 3)For resetting B2R2 hardware,write to B2R2 Control register the
+ * B2R2BLT_CTLGLOBAL_soft_reset and then polling for on
+ * B2R2 status register for B2R2BLT_STA1BDISP_IDLE flag.
+ *
+ * 4)Wait for B2R2 hardware to be idle (on a timeout rather than while loop)
+ *
+ * 5)Driver status reset
+ *
+ * 6)Recover from any error without any leaks.
+ *
+ */
+static int init_hw(struct b2r2_core *core)
+{
+ int result = 0;
+ u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
+
+ /* Put B2R2 into reset */
+ clear_interrupts(core);
+
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ /* Set up interrupt handler */
+ result = request_irq(core->irq, b2r2_irq_handler, 0,
+ "b2r2-interrupt", core);
+ if (result) {
+ b2r2_log_err(core->dev,
+ "%s: failed to register IRQ for B2R2\n", __func__);
+ goto b2r2_init_request_irq_failed;
+ }
+
+ b2r2_log_info(core->dev, "do a global reset..\n");
+
+ /* Release reset */
+ writel(0x00000000, &core->hw->BLT_CTL);
+
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
+
+ /** Wait for B2R2 to be idle (on a timeout rather than while loop) */
+ while ((uTimeOut > 0) &&
+ ((readl(&core->hw->BLT_STA1) &
+ B2R2BLT_STA1BDISP_IDLE) == 0x0))
+ uTimeOut--;
+ if (uTimeOut == 0) {
+ b2r2_log_err(core->dev,
+ "%s: B2R2 not idle after SW reset\n", __func__);
+ result = -EAGAIN;
+ goto b2r2_core_init_hw_timeout;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debug fs files for register access */
+ if (core->debugfs_core_root_dir && !core->debugfs_regs_dir) {
+ int i;
+ core->debugfs_regs_dir = debugfs_create_dir("regs",
+ core->debugfs_core_root_dir);
+ debugfs_create_file("all", 0666, core->debugfs_regs_dir,
+ (void *)core->hw, &debugfs_b2r2_regs_fops);
+ /* Create debugfs entries for all static registers */
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++)
+ debugfs_create_file(debugfs_regs[i].name, 0666,
+ core->debugfs_regs_dir,
+ (void *)(((u8 *) core->hw) +
+ debugfs_regs[i].offset),
+ &debugfs_b2r2_reg_fops);
+ }
+#endif
+
+ b2r2_log_info(core->dev, "%s ended..\n", __func__);
+ return result;
+
+/** Recover from any error without any leaks */
+b2r2_core_init_hw_timeout:
+ /** Free B2R2 interrupt handler */
+ free_irq(core->irq, core);
+
+b2r2_init_request_irq_failed:
+ if (core->hw)
+ iounmap(core->hw);
+ core->hw = NULL;
+
+ return result;
+}
+
+
+/**
+ * exit_hw() - B2R2 Hardware exit
+ *
+ * core->lock _must_ NOT be held
+ */
+static void exit_hw(struct b2r2_core *core)
+{
+ unsigned long flags;
+
+ b2r2_log_info(core->dev, "%s started..\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Unregister our debugfs entries */
+ if (core->debugfs_regs_dir) {
+ debugfs_remove_recursive(core->debugfs_regs_dir);
+ core->debugfs_regs_dir = NULL;
+ }
+#endif
+ b2r2_log_debug(core->dev, "%s: locking core->lock\n", __func__);
+ spin_lock_irqsave(&core->lock, flags);
+
+ /* Cancel all pending jobs */
+ b2r2_log_debug(core->dev, "%s: canceling pending jobs\n", __func__);
+ exit_job_list(core, &core->prio_queue);
+
+ /* Soft reset B2R2 (Close all DMA,
+ reset all state to idle, reset regs)*/
+ b2r2_log_debug(core->dev, "%s: putting b2r2 in reset\n", __func__);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+
+ b2r2_log_debug(core->dev, "%s: clearing interrupts\n", __func__);
+ clear_interrupts(core);
+
+ /** Free B2R2 interrupt handler */
+ b2r2_log_debug(core->dev, "%s: freeing interrupt handler\n", __func__);
+ free_irq(core->irq, core);
+
+ b2r2_log_debug(core->dev, "%s: unlocking core->lock\n", __func__);
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ b2r2_log_info(core->dev, "%s ended...\n", __func__);
+}
+
+/**
+ * b2r2_probe() - This routine loads the B2R2 core driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct b2r2_core *core;
+ struct b2r2_control *control;
+
+ BUG_ON(pdev == NULL);
+ BUG_ON(pdev->id < 0 || pdev->id >= B2R2_MAX_NBR_DEVICES);
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ dev_err(&pdev->dev, "b2r2 core alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_core_alloc_fail;
+ }
+
+ core->dev = &pdev->dev;
+ dev_set_drvdata(core->dev, core);
+ if (pdev->id)
+ snprintf(core->name, sizeof(core->name), "b2r2_%d", pdev->id);
+ else
+ snprintf(core->name, sizeof(core->name), "b2r2");
+
+ dev_info(&pdev->dev, "init started.\n");
+
+ /* Init spin locks */
+ spin_lock_init(&core->lock);
+
+ /* Init job queues */
+ INIT_LIST_HEAD(&core->prio_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ /* Create work queue for callbacks & timeout */
+ INIT_DELAYED_WORK(&core->timeout_work, timeout_work_function);
+#endif
+
+ /* Work queue for callbacks and timeout management */
+ core->work_queue = create_workqueue("B2R2");
+ if (!core->work_queue) {
+ ret = -ENOMEM;
+ goto b2r2_probe_no_work_queue;
+ }
+
+ /* Get the clock for B2R2 */
+ core->b2r2_clock = clk_get(core->dev, "b2r2");
+ if (IS_ERR(core->b2r2_clock)) {
+ ret = PTR_ERR(core->b2r2_clock);
+ dev_err(&pdev->dev, "clk_get b2r2 failed\n");
+ goto b2r2_probe_no_clk;
+ }
+
+ /* Get the B2R2 regulator */
+ core->b2r2_reg = regulator_get(core->dev, "vsupply");
+ if (IS_ERR(core->b2r2_reg)) {
+ ret = PTR_ERR(core->b2r2_reg);
+ dev_err(&pdev->dev, "regulator_get vsupply failed "
+ "(dev_name=%s)\n", dev_name(core->dev));
+ goto b2r2_probe_no_reg;
+ }
+
+ /* Init power management */
+ mutex_init(&core->domain_lock);
+ INIT_DELAYED_WORK_DEFERRABLE(&core->domain_disable_work,
+ domain_disable_work_function);
+ core->domain_enabled = false;
+
+ /* Map B2R2 into kernel virtual memory space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ goto b2r2_probe_no_res;
+
+ /* Hook up irq */
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq <= 0) {
+ dev_err(&pdev->dev, "%s: Failed to request irq (irq=%d)\n",
+ __func__, core->irq);
+ goto b2r2_failed_irq_get;
+ }
+
+ core->hw = (struct b2r2_memory_map *) ioremap(res->start,
+ res->end - res->start + 1);
+ if (core->hw == NULL) {
+ dev_err(&pdev->dev, "%s: ioremap failed\n", __func__);
+ ret = -ENOMEM;
+ goto b2r2_probe_ioremap_failed;
+ }
+
+ dev_dbg(core->dev, "b2r2 structure address %p\n", core->hw);
+
+ control = kzalloc(sizeof(*control), GFP_KERNEL);
+ if (!control) {
+ dev_err(&pdev->dev, "b2r2 control alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_control_alloc_fail;
+ }
+
+ control->miscdev.parent = core->dev;
+ control->data = (void *)core;
+ control->id = pdev->id;
+ control->dev = &pdev->dev; /* Temporary device */
+ snprintf(control->name, sizeof(control->name), "%s_blt", core->name);
+
+ core->op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
+ core->ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
+ core->pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
+ core->mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
+ core->min_req_time = 0;
+
+#ifdef CONFIG_DEBUG_FS
+ core->debugfs_root_dir = debugfs_create_dir(core->name, NULL);
+ core->debugfs_core_root_dir = debugfs_create_dir("core",
+ core->debugfs_root_dir);
+ debugfs_create_file("stats", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_stat_fops);
+ debugfs_create_file("clock", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_clock_fops);
+ debugfs_create_u8("op_size", 0666, core->debugfs_core_root_dir,
+ &core->op_size);
+ debugfs_create_u8("ch_size", 0666, core->debugfs_core_root_dir,
+ &core->ch_size);
+ debugfs_create_u8("pg_size", 0666, core->debugfs_core_root_dir,
+ &core->pg_size);
+ debugfs_create_u8("mg_size", 0666, core->debugfs_core_root_dir,
+ &core->mg_size);
+ debugfs_create_u16("min_req_time", 0666, core->debugfs_core_root_dir,
+ &core->min_req_time);
+
+ control->debugfs_debug_root_dir = debugfs_create_dir("debug",
+ core->debugfs_root_dir);
+ control->mem_heap.debugfs_root_dir = debugfs_create_dir("mem",
+ core->debugfs_root_dir);
+ control->debugfs_root_dir = debugfs_create_dir("blt",
+ core->debugfs_root_dir);
+#endif
+
+ ret = b2r2_debug_init(control);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "b2r2_debug_init failed\n");
+ goto b2r2_probe_debug_init_failed;
+ }
+
+ /* Initialize b2r2_blt module. FIXME: Module of it's own
+ or perhaps a dedicated module init c file? */
+ ret = b2r2_blt_module_init(control);
+ if (ret < 0) {
+ b2r2_log_err(&pdev->dev, "b2r2_blt_module_init failed\n");
+ goto b2r2_probe_blt_init_fail;
+ }
+
+ core->control = control;
+ b2r2_core[pdev->id] = core;
+ dev_info(&pdev->dev, "init done.\n");
+
+ return ret;
+
+/** Recover from any error if something fails */
+b2r2_probe_blt_init_fail:
+ kfree(control);
+b2r2_probe_control_alloc_fail:
+b2r2_probe_ioremap_failed:
+b2r2_failed_irq_get:
+b2r2_probe_no_res:
+ regulator_put(core->b2r2_reg);
+b2r2_probe_no_reg:
+ clk_put(core->b2r2_clock);
+b2r2_probe_no_clk:
+ destroy_workqueue(core->work_queue);
+ core->work_queue = NULL;
+b2r2_probe_no_work_queue:
+ b2r2_debug_exit();
+b2r2_probe_debug_init_failed:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(core->debugfs_root_dir);
+#endif
+ kfree(core);
+b2r2_probe_core_alloc_fail:
+ dev_info(&pdev->dev, "init done with errors.\n");
+
+ return ret;
+}
+
+
+
+/**
+ * b2r2_remove - This routine unloads b2r2 driver
+ *
+ * @pdev: platform device.
+ */
+static int b2r2_remove(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(&pdev->dev, "%s: Started\n", __func__);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(core->debugfs_root_dir);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+ /* Exit b2r2 blt module */
+ b2r2_blt_module_exit(core->control);
+
+ kfree(core->control);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure the power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ /** Unmap B2R2 registers */
+ b2r2_log_info(&pdev->dev, "unmap b2r2 registers..\n");
+ if (core->hw) {
+ iounmap(core->hw);
+ core->hw = NULL;
+ }
+
+ destroy_workqueue(core->work_queue);
+
+ spin_lock_irqsave(&core->lock, flags);
+ core->work_queue = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
+
+ /* Return the clock */
+ clk_put(core->b2r2_clock);
+ regulator_put(core->b2r2_reg);
+
+ core->dev = NULL;
+ kfree(core);
+ b2r2_core[pdev->id] = NULL;
+
+ b2r2_debug_exit();
+
+ b2r2_log_info(&pdev->dev, "%s: Ended\n", __func__);
+
+ return 0;
+}
+/**
+ * b2r2_suspend() - This routine puts the B2R2 device in to sustend state.
+ * @pdev: platform device.
+ *
+ * This routine stores the current state of the b2r2 device and puts in to
+ * suspend state.
+ *
+ */
+int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ /* Flush B2R2 work queue (call all callbacks) */
+ flush_workqueue(core->work_queue);
+
+#ifdef HANDLE_TIMEOUTED_JOBS
+ cancel_delayed_work(&core->timeout_work);
+#endif
+
+ /* Flush B2R2 work queue (call all callbacks for
+ cancelled jobs) */
+ flush_workqueue(core->work_queue);
+
+ /* Make sure power is turned off */
+ cancel_delayed_work_sync(&core->domain_disable_work);
+
+ return 0;
+}
+
+
+/**
+ * b2r2_resume() - This routine resumes the B2R2 device from sustend state.
+ * @pdev: platform device.
+ *
+ * This routine restore back the current state of the b2r2 device resumes.
+ *
+ */
+int b2r2_resume(struct platform_device *pdev)
+{
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+/**
+ * struct platform_b2r2_driver - Platform driver configuration for the
+ * B2R2 core driver
+ */
+static struct platform_driver platform_b2r2_driver = {
+ .remove = b2r2_remove,
+ .driver = {
+ .name = "b2r2",
+ },
+ /** TODO implement power mgmt functions */
+ .suspend = b2r2_suspend,
+ .resume = b2r2_resume,
+};
+
+
+/**
+ * b2r2_init() - Module init function for the B2R2 core module
+ */
+static int __init b2r2_init(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ return platform_driver_probe(&platform_b2r2_driver, b2r2_probe);
+}
+module_init(b2r2_init);
+
+/**
+ * b2r2_exit() - Module exit function for the B2R2 core module
+ */
+static void __exit b2r2_exit(void)
+{
+ printk(KERN_INFO "%s\n", __func__);
+ platform_driver_unregister(&platform_b2r2_driver);
+ return;
+}
+module_exit(b2r2_exit);
+
+
+/** Module is having GPL license */
+
+MODULE_LICENSE("GPL");
+
+/** Module author & discription */
+
+MODULE_AUTHOR("Robert Fekete (robert.fekete@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Core driver");
diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h
new file mode 100644
index 00000000000..991dd9d9d1b
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_core.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 core driver
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_CORE_H__
+#define __B2R2_CORE_H__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+
+/**
+ * b2r2_core_job_add() - Adds a job to B2R2 job queues
+ *
+ * The job reference count will be increased after this function
+ * has been called and b2r2_core_job_release() must be called to
+ * release the reference. The job callback function will be always
+ * be called after the job is done or cancelled.
+ *
+ * @control: The b2r2 control entity
+ * @job: Job to be added
+ *
+ * Returns 0 if OK else negative error code
+ *
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_wait() - Waits for an added job to be done.
+ *
+ * @job: Job to wait for
+ *
+ * Returns 0 if job done else negative error code
+ *
+ */
+int b2r2_core_job_wait(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_cancel() - Cancel an already added job.
+ *
+ * @job: Job to cancel
+ *
+ * Returns 0 if job cancelled or done else negative error code
+ *
+ */
+int b2r2_core_job_cancel(struct b2r2_core_job *job);
+
+/**
+ * b2r2_core_job_find() - Finds job with given job id
+ *
+ * Reference count will be increased for the found job
+ *
+ * @control: The b2r2 control entity
+ * @job_id: Job id to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id);
+
+/**
+ * b2r2_core_job_find_first_with_tag() - Finds first job with given tag
+ *
+ * Reference count will be increased for the found job.
+ * This function can be used to find all jobs for a client, i.e.
+ * when cancelling all jobs for a client.
+ *
+ * @control: The b2r2 control entity
+ * @tag: Tag to find
+ *
+ * Returns job if found, else NULL
+ *
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag);
+
+/**
+ * b2r2_core_job_addref() - Increase the job reference count.
+ *
+ * @job: Job to increase reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller);
+
+/**
+ * b2r2_core_job_release() - Decrease the job reference count. The
+ * job will be released (the release() function
+ * will be called) when the reference count
+ * reaches zero.
+ *
+ * @job: Job to decrease reference count for.
+ * @caller: The function calling this function (for debug)
+ */
+void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller);
+
+#endif /* !defined(__B2R2_CORE_JOB_H__) */
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
new file mode 100644
index 00000000000..23a0b1aa9ac
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+static struct dentry *log_lvl_dir;
+static int module_init;
+
+#define CHARS_IN_NODE_DUMP 1544
+#define DUMPED_NODE_SIZE (CHARS_IN_NODE_DUMP * sizeof(char) + 1)
+
+static void dump_node(char *dst, struct b2r2_node *node)
+{
+ dst += sprintf(dst, "node 0x%08x ------------------\n",
+ (unsigned int)node);
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_NIP:", node->node.GROUP0.B2R2_NIP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CIC:", node->node.GROUP0.B2R2_CIC);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_INS:", node->node.GROUP0.B2R2_INS);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_ACK:", node->node.GROUP0.B2R2_ACK);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TBA:", node->node.GROUP1.B2R2_TBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TTY:", node->node.GROUP1.B2R2_TTY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TXY:", node->node.GROUP1.B2R2_TXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_TSZ:", node->node.GROUP1.B2R2_TSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1CF:", node->node.GROUP2.B2R2_S1CF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2CF:", node->node.GROUP2.B2R2_S2CF);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1BA:", node->node.GROUP3.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1TY:", node->node.GROUP3.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1XY:", node->node.GROUP3.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S1SZ:", node->node.GROUP3.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2BA:", node->node.GROUP4.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2TY:", node->node.GROUP4.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2XY:", node->node.GROUP4.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S2SZ:", node->node.GROUP4.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3BA:", node->node.GROUP5.B2R2_SBA);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3TY:", node->node.GROUP5.B2R2_STY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3XY:", node->node.GROUP5.B2R2_SXY);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_S3SZ:", node->node.GROUP5.B2R2_SSZ);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWO:", node->node.GROUP6.B2R2_CWO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CWS:", node->node.GROUP6.B2R2_CWS);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CCO:", node->node.GROUP7.B2R2_CCO);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_CML:", node->node.GROUP7.B2R2_CML);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_PMK:", node->node.GROUP8.B2R2_PMK);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FCTL:", node->node.GROUP8.B2R2_FCTL);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RSF:", node->node.GROUP9.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_RZI:", node->node.GROUP9.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_HFP:", node->node.GROUP9.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_VFP:", node->node.GROUP9.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RSF:", node->node.GROUP10.B2R2_RSF);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_RZI:", node->node.GROUP10.B2R2_RZI);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_HFP:", node->node.GROUP10.B2R2_HFP);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_Y_VFP:", node->node.GROUP10.B2R2_VFP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF0:", node->node.GROUP11.B2R2_FF0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF1:", node->node.GROUP11.B2R2_FF1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF2:", node->node.GROUP11.B2R2_FF2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_FF3:", node->node.GROUP11.B2R2_FF3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY1:", node->node.GROUP12.B2R2_KEY1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_KEY2:", node->node.GROUP12.B2R2_KEY2);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYL:", node->node.GROUP13.B2R2_XYL);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_XYP:", node->node.GROUP13.B2R2_XYP);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_SAR:", node->node.GROUP14.B2R2_SAR);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_USR:", node->node.GROUP14.B2R2_USR);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX0:", node->node.GROUP15.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX1:", node->node.GROUP15.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX2:", node->node.GROUP15.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_IVMX3:", node->node.GROUP15.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX0:", node->node.GROUP16.B2R2_VMX0);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX1:", node->node.GROUP16.B2R2_VMX1);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX2:", node->node.GROUP16.B2R2_VMX2);
+ dst += sprintf(dst, "%s\t0x%08x\n",
+ "B2R2_OVMX3:", node->node.GROUP16.B2R2_VMX3);
+ dst += sprintf(dst, "--\n");
+
+}
+
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ struct b2r2_node **dst_node;
+ unsigned int node_count = 0;
+
+ while (node != NULL) {
+ node_count++;
+ node = node->next;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+
+ if (cont->last_job) {
+ node = cont->last_job;
+ while (node != NULL) {
+ struct b2r2_node *tmp = node->next;
+ kfree(node);
+ node = tmp;
+ }
+ cont->last_job = NULL;
+ }
+
+ node = first_node;
+ dst_node = &cont->last_job;
+ while (node != NULL) {
+ *dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL);
+ if (!(*dst_node))
+ goto last_job_alloc_failed;
+
+ memcpy(*dst_node, node, sizeof(**dst_node));
+
+ dst_node = &((*dst_node)->next);
+ node = node->next;
+ }
+
+ mutex_unlock(&cont->last_job_lock);
+
+ return;
+
+last_job_alloc_failed:
+ mutex_unlock(&cont->last_job_lock);
+
+ while (cont->last_job != NULL) {
+ struct b2r2_node *tmp = cont->last_job->next;
+ kfree(cont->last_job);
+ cont->last_job = tmp;
+ }
+
+ return;
+}
+
+static ssize_t last_job_read(struct file *filp, char __user *buf,
+ size_t bytes, loff_t *off)
+{
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+ struct b2r2_node *node = cont->last_job;
+ int node_count = 0;
+ int i;
+
+ size_t size;
+ size_t count;
+ loff_t offs = *off;
+
+ for (; node != NULL; node = node->next)
+ node_count++;
+
+ size = node_count * DUMPED_NODE_SIZE;
+
+ if (node_count != cont->prev_node_count) {
+ kfree(cont->last_job_chars);
+
+ cont->last_job_chars = kzalloc(size, GFP_KERNEL);
+ if (!cont->last_job_chars)
+ return 0;
+ cont->prev_node_count = node_count;
+ }
+
+ mutex_lock(&cont->last_job_lock);
+ node = cont->last_job;
+ for (i = 0; i < node_count; i++) {
+ BUG_ON(node == NULL);
+ dump_node(cont->last_job_chars +
+ i * DUMPED_NODE_SIZE/sizeof(char),
+ node);
+ node = node->next;
+ }
+ mutex_unlock(&cont->last_job_lock);
+
+ if (offs > size)
+ return 0;
+
+ if (offs + bytes > size)
+ count = size - offs;
+ else
+ count = bytes;
+
+ if (copy_to_user(buf, cont->last_job_chars + offs, count))
+ return -EFAULT;
+
+ *off = offs + count;
+ return count;
+}
+
+static const struct file_operations last_job_fops = {
+ .read = last_job_read,
+};
+
+int b2r2_debug_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!module_init) {
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = 0;
+
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ /*
+ * If dynamic debug is disabled we need some other way to
+ * control the log prints
+ */
+ log_lvl_dir = debugfs_create_dir("b2r2_log", NULL);
+
+ /* No need to save the files,
+ * they will be removed recursively */
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+ /* log_lvl_dir is never used */
+ (void)log_lvl_dir;
+#endif
+ module_init++;
+ }
+
+ if (cont->debugfs_debug_root_dir) {
+ /* No need to save the file,
+ * it will be removed recursively */
+ (void)debugfs_create_file("last_job", 0444,
+ cont->debugfs_debug_root_dir, cont,
+ &last_job_fops);
+ }
+
+ mutex_init(&cont->last_job_lock);
+
+ return 0;
+}
+
+void b2r2_debug_exit(void)
+{
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ module_init--;
+ if (!module_init && log_lvl_dir) {
+ debugfs_remove_recursive(log_lvl_dir);
+ log_lvl_dir = NULL;
+ }
+#endif
+}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
new file mode 100644
index 00000000000..1b1ac83f6cb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 dynamic debug
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+
+#include <linux/device.h>
+
+#include "b2r2_internal.h"
+
+#ifdef CONFIG_B2R2_DEBUG
+
+/* Log macros */
+enum b2r2_log_levels {
+ B2R2_LOG_LEVEL_WARN,
+ B2R2_LOG_LEVEL_INFO,
+ B2R2_LOG_LEVEL_DEBUG,
+ B2R2_LOG_LEVEL_REGDUMP,
+ B2R2_LOG_LEVEL_COUNT,
+};
+
+/*
+ * Booleans controlling the different log levels. The different log levels are
+ * enabled separately (i.e. you can have info prints without the warn prints).
+ */
+extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+
+#define b2r2_log_err(b2r2_log_dev, ...) do { \
+ dev_err(b2r2_log_dev, __VA_ARGS__); \
+ } while (0)
+
+/* If dynamic debug is enabled it should be used instead of loglevels */
+#ifdef CONFIG_DYNAMIC_DEBUG
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#else
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
+ dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
+ dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
+ dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+#endif
+
+int b2r2_debug_init(struct b2r2_control *cont);
+void b2r2_debug_exit(void);
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node);
+
+#else
+
+#define b2r2_log_err(...)
+#define b2r2_log_warn(...)
+#define b2r2_log_info(...)
+#define b2r2_log_debug(...)
+#define b2r2_log_regdump(...)
+
+static inline int b2r2_debug_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+static inline void b2r2_debug_exit(void)
+{
+ return;
+}
+static inline void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c
new file mode 100644
index 00000000000..a969816a9e7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include "b2r2_filters.h"
+#include "b2r2_internal.h"
+
+/**
+ * struct b2r2_filter_spec filters[] - Filter lookup table
+ *
+ * Lookup table for filters for different scale factors. A filter
+ * will be selected according to "min < scale_factor <= max".
+ */
+static struct b2r2_filter_spec filters[] = {
+ {
+ .min = 1024,
+ .max = 1433,
+ .h_coeffs = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ },
+ .v_coeffs = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ },
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .h_coeffs = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ },
+ .v_coeffs = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ },
+ },
+ {
+ .min = 1536,
+ .max = 3072,
+ .h_coeffs = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ },
+ .v_coeffs = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ },
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .h_coeffs = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ },
+ .v_coeffs = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ },
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .h_coeffs = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ },
+ .v_coeffs = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ },
+ },
+};
+static const size_t filters_size = sizeof(filters)/sizeof(filters[0]);
+
+/**
+ * struct b2r2_filter_spec bilinear_filter - A bilinear filter
+ *
+ * The bilinear filter will be used if no custom filters are specified, or
+ * for upscales not matching any filter in the lookup table.
+ */
+static struct b2r2_filter_spec bilinear_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x08, 0x3e, 0xfb, 0x00, 0x00,
+ 0x00, 0x00, 0xfb, 0x13, 0x3b, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x1f, 0x34, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x2b, 0x2a, 0xf5, 0x00, 0x00,
+ 0x00, 0x00, 0xf6, 0x35, 0x1e, 0xf7, 0x00, 0x00,
+ 0x00, 0x00, 0xf9, 0x3c, 0x12, 0xf9, 0x00, 0x00,
+ 0x00, 0x00, 0xfd, 0x3f, 0x07, 0xfd, 0x00, 0x00
+ },
+ .v_coeffs = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x09, 0x3d, 0xfa, 0x00,
+ 0x00, 0x13, 0x39, 0xf4, 0x00,
+ 0x00, 0x1e, 0x31, 0xf1, 0x00,
+ 0x00, 0x27, 0x28, 0xf1, 0x00,
+ 0x00, 0x31, 0x1d, 0xf2, 0x00,
+ 0x00, 0x38, 0x12, 0xf6, 0x00,
+ 0x00, 0x3d, 0x07, 0xfc, 0x00
+ },
+};
+
+/**
+ * struct b2r2_filter_spec default_downscale_filter - Default filter for downscale
+ *
+ * The default downscale filter will be used for downscales not matching any
+ * filter in the lookup table.
+ */
+static struct b2r2_filter_spec default_downscale_filter = {
+ .min = 1 << 10,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ },
+ .v_coeffs = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ },
+};
+
+/**
+ * struct b2r2_filter_spec blur_filter - Blur filter
+ *
+ * Filter for blurring an image.
+ */
+static struct b2r2_filter_spec blur_filter = {
+ .min = 0,
+ .max = 0xffff,
+ .h_coeffs = {
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08
+ },
+ .v_coeffs = {
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x10, 0x0c, 0x0c
+ },
+};
+
+/* Private function declarations */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+
+/* Public functions */
+
+int b2r2_filters_init(struct b2r2_control *cont)
+{
+ int i;
+
+ if (cont->filters_initialized)
+ return 0;
+
+ for (i = 0; i < filters_size; i++) {
+ alloc_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ alloc_filter_coeffs(cont->dev, &bilinear_filter);
+ alloc_filter_coeffs(cont->dev, &default_downscale_filter);
+ alloc_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 1;
+
+ return 0;
+}
+
+void b2r2_filters_exit(struct b2r2_control *cont)
+{
+ int i;
+
+ if (!cont->filters_initialized)
+ return;
+
+ for (i = 0; i < filters_size; i++) {
+ free_filter_coeffs(cont->dev, &filters[i]);
+ }
+
+ free_filter_coeffs(cont->dev, &bilinear_filter);
+ free_filter_coeffs(cont->dev, &default_downscale_filter);
+ free_filter_coeffs(cont->dev, &blur_filter);
+
+ cont->filters_initialized = 0;
+}
+
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor)
+{
+ int i;
+ struct b2r2_filter_spec *filter = NULL;
+
+ for (i = 0; i < filters_size; i++) {
+ if ((filters[i].min < scale_factor) &&
+ (scale_factor <= filters[i].max) &&
+ filters[i].h_coeffs_dma_addr &&
+ filters[i].v_coeffs_dma_addr) {
+ filter = &filters[i];
+ break;
+ }
+ }
+
+ if (filter == NULL) {
+ /*
+ * No suitable filter has been found. Use default filters,
+ * bilinear for any upscale.
+ */
+ if (scale_factor < (1 << 10))
+ filter = &bilinear_filter;
+ else
+ filter = &default_downscale_filter;
+ }
+
+ /*
+ * Check so that the coefficients were successfully allocated for this
+ * filter.
+ */
+ if (!filter->h_coeffs_dma_addr || !filter->v_coeffs_dma_addr)
+ return NULL;
+ else
+ return filter;
+}
+
+struct b2r2_filter_spec *b2r2_filter_blur()
+{
+ return &blur_filter;
+}
+
+/* Private functions */
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ int ret;
+
+ filter->h_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->h_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ filter->v_coeffs_dma_addr = dma_alloc_coherent(dev,
+ B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr),
+ GFP_DMA | GFP_KERNEL);
+ if (filter->v_coeffs_dma_addr == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs,
+ B2R2_HF_TABLE_SIZE);
+ memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs,
+ B2R2_VF_TABLE_SIZE);
+
+ return 0;
+
+error:
+ free_filter_coeffs(dev, filter);
+ return ret;
+
+}
+
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
+{
+ if (filter->h_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_HF_TABLE_SIZE,
+ filter->h_coeffs_dma_addr,
+ filter->h_coeffs_phys_addr);
+ if (filter->v_coeffs_dma_addr != NULL)
+ dma_free_coherent(dev, B2R2_VF_TABLE_SIZE,
+ filter->v_coeffs_dma_addr,
+ filter->v_coeffs_phys_addr);
+
+ filter->h_coeffs_dma_addr = NULL;
+ filter->h_coeffs_phys_addr = 0;
+ filter->v_coeffs_dma_addr = NULL;
+ filter->v_coeffs_phys_addr = 0;
+}
diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h
new file mode 100644
index 00000000000..790c9ec8ee9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_filters.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 filters.
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_FILTERS_H
+#define _LINUX_VIDEO_B2R2_FILTERS_H
+
+#include <linux/kernel.h>
+
+#include "b2r2_internal.h"
+
+#define B2R2_HF_TABLE_SIZE 64
+#define B2R2_VF_TABLE_SIZE 40
+
+/**
+ * @struct b2r2_filter_spec - Filter specification structure
+ *
+ * @param min - Minimum scale factor for this filter (in 6.10 fixed point)
+ * @param max - Maximum scale factor for this filter (in 6.10 fixed point)
+ * @param h_coeffs - Horizontal filter coefficients
+ * @param v_coeffs - Vertical filter coefficients
+ * @param h_coeffs_dma_addr - Virtual DMA address for horizontal coefficients
+ * @param v_coeffs_dma_addr - Virtual DMA address for vertical coefficients
+ * @param h_coeffs_phys_addr - Physical address for horizontal coefficients
+ * @param v_coeffs_phys_addr - Physical address for vertical coefficients
+ */
+struct b2r2_filter_spec {
+ const u16 min;
+ const u16 max;
+
+ const u8 h_coeffs[B2R2_HF_TABLE_SIZE];
+ const u8 v_coeffs[B2R2_VF_TABLE_SIZE];
+
+ void *h_coeffs_dma_addr;
+ u32 h_coeffs_phys_addr;
+
+ void *v_coeffs_dma_addr;
+ u32 v_coeffs_phys_addr;
+};
+
+/**
+ * b2r2_filters_init() - Initilizes the B2R2 filters
+ */
+int b2r2_filters_init(struct b2r2_control *control);
+
+/**
+ * b2r2_filters_init() - De-initilizes the B2R2 filters
+ */
+void b2r2_filters_exit(struct b2r2_control *control);
+
+/**
+ * b2r2_filter_find() - Find a filter matching the given scale factor
+ *
+ * @param scale_factor - Scale factor to find a filter for
+ *
+ * Returns NULL if no filter could be found.
+ */
+struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor);
+
+/**
+ * b2r2_filter_blur() - Returns the blur filter
+ *
+ * Returns NULL if no blur filter is available.
+ */
+struct b2r2_filter_spec *b2r2_filter_blur(void);
+
+#endif /* _LINUX_VIDEO_B2R2_FILTERS_H */
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
new file mode 100644
index 00000000000..1a27adbaadf
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -0,0 +1,3334 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include "b2r2_generic.h"
+#include "b2r2_internal.h"
+#include "b2r2_global.h"
+#include "b2r2_debug.h"
+#include "b2r2_filters.h"
+
+/*
+ * Debug printing
+ */
+#define B2R2_GENERIC_DEBUG_AREAS 0
+#define B2R2_GENERIC_DEBUG
+
+#define B2R2_GENERIC_WORK_BUF_WIDTH 16
+#define B2R2_GENERIC_WORK_BUF_HEIGHT 16
+#define B2R2_GENERIC_WORK_BUF_PITCH (16 * 4)
+#define B2R2_GENERIC_WORK_BUF_FMT B2R2_NATIVE_ARGB8888
+
+/*
+ * Private functions
+ */
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_control *cont,
+ struct b2r2_node *node)
+{
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ while (node != NULL) {
+ memset(&(node->node), 0, sizeof(node->node));
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7fffc;
+
+ if (node->next == NULL)
+ break;
+
+ node->node.GROUP0.B2R2_NIP = node->next->physical_address;
+
+ node = node->next;
+ }
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * dump_nodes() - prints the node list
+ */
+static void dump_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first, bool dump_all)
+{
+ struct b2r2_node *node = first;
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+ do {
+ b2r2_log_debug(cont->dev, "\nNODE START:\n=============\n");
+ b2r2_log_debug(cont->dev, "B2R2_ACK: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_ACK);
+ b2r2_log_debug(cont->dev, "B2R2_INS: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_INS);
+ b2r2_log_debug(cont->dev, "B2R2_CIC: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_CIC);
+ b2r2_log_debug(cont->dev, "B2R2_NIP: \t0x%.8x\n",
+ node->node.GROUP0.B2R2_NIP);
+
+ b2r2_log_debug(cont->dev, "B2R2_TSZ: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TSZ);
+ b2r2_log_debug(cont->dev, "B2R2_TXY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TXY);
+ b2r2_log_debug(cont->dev, "B2R2_TTY: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TTY);
+ b2r2_log_debug(cont->dev, "B2R2_TBA: \t0x%.8x\n",
+ node->node.GROUP1.B2R2_TBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S2CF);
+ b2r2_log_debug(cont->dev, "B2R2_S1CF: \t0x%.8x\n",
+ node->node.GROUP2.B2R2_S1CF);
+
+ b2r2_log_debug(cont->dev, "B2R2_S1SZ: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S1XY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S1TY: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S1BA: \t0x%.8x\n",
+ node->node.GROUP3.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S2SZ: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S2XY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S2TY: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S2BA: \t0x%.8x\n",
+ node->node.GROUP4.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_S3SZ: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SSZ);
+ b2r2_log_debug(cont->dev, "B2R2_S3XY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SXY);
+ b2r2_log_debug(cont->dev, "B2R2_S3TY: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_STY);
+ b2r2_log_debug(cont->dev, "B2R2_S3BA: \t0x%.8x\n",
+ node->node.GROUP5.B2R2_SBA);
+
+ b2r2_log_debug(cont->dev, "B2R2_CWS: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWS);
+ b2r2_log_debug(cont->dev, "B2R2_CWO: \t0x%.8x\n",
+ node->node.GROUP6.B2R2_CWO);
+
+ b2r2_log_debug(cont->dev, "B2R2_FCTL: \t0x%.8x\n",
+ node->node.GROUP8.B2R2_FCTL);
+ b2r2_log_debug(cont->dev, "B2R2_RSF: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_RZI: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_HFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_VFP: \t0x%.8x\n",
+ node->node.GROUP9.B2R2_VFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RSF: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RSF);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RZI: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_RZI);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_HFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_HFP);
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_VFP: \t0x%.8x\n",
+ node->node.GROUP10.B2R2_VFP);
+
+
+ b2r2_log_debug(cont->dev, "B2R2_IVMX0: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX0);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX1: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX1);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX2: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX2);
+ b2r2_log_debug(cont->dev, "B2R2_IVMX3: \t0x%.8x\n",
+ node->node.GROUP15.B2R2_VMX3);
+ b2r2_log_debug(cont->dev, "\n=============\nNODE END\n");
+
+ node = node->next;
+ } while (node != NULL && dump_all);
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static inline enum b2r2_native_fmt to_native_fmt(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static inline enum b2r2_ty get_alpha_range(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ break;
+ default:
+ break;
+ }
+
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+}
+
+static unsigned int get_pitch(struct b2r2_control *cont,
+ enum b2r2_blt_fmt format, u32 width)
+{
+ switch (format) {
+ case B2R2_BLT_FMT_1_BIT_A1: {
+ int pitch = width >> 3;
+ /* Check for remainder */
+ if (width & 7)
+ pitch++;
+ return pitch;
+ break;
+ }
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return width;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* all 16 bits/pixel RGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* all 24 bits/pixel raster formats */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* all 32 bits/pixel formats */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return width * 4;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* width of the buffer must be a multiple of 4 */
+ if (width & 3) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ return width * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+ break;
+ /* fall through, same pitch and pointers */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ /* width of the buffer must be a multiple of 2 */
+ if (width & 1) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* width of the buffer must be a multiple of 16. */
+ if (width & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+ /*
+ * return pitch of the Y-buffer.
+ * U and V pitch can be derived from it.
+ */
+ return width;
+ break;
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unable to determine pitch "
+ "for fmt=%#010x width=%d\n", __func__,
+ format, width);
+ return 0;
+ }
+}
+
+static s32 validate_buf(struct b2r2_control *cont,
+ const struct b2r2_blt_img *image,
+ const struct b2r2_resolved_buf *buf)
+{
+ u32 expect_buf_size;
+ u32 pitch;
+
+ if (image->width <= 0 || image->height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: width=%d or height=%d negative"
+ ".\n", __func__, image->width, image->height);
+ return -EINVAL;
+ }
+
+ if (image->pitch == 0) {
+ /* autodetect pitch based on format and width */
+ pitch = get_pitch(cont, image->fmt, image->width);
+ } else
+ pitch = image->pitch;
+
+ expect_buf_size = pitch * image->height;
+
+ if (pitch == 0) {
+ b2r2_log_warn(cont->dev, "%s: Unable to detect pitch. "
+ "fmt=%#010x, width=%d\n",
+ __func__,
+ image->fmt, image->width);
+ return -EINVAL;
+ }
+
+ /* format specific adjustments */
+ switch (image->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ /*
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size +=
+ (pitch >> 1) * ((image->height + 1) >> 1) * 2;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ expect_buf_size += (pitch >> 1) * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ expect_buf_size += pitch * image->height * 2;
+ break;
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ * Use ceil(height/2) in case buffer height
+ * is not divisible by 2.
+ */
+ expect_buf_size += pitch * ((image->height + 1) >> 1);
+ break;
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ /*
+ * include space occupied by U and V data.
+ * U and V interleaved, half resolution, which makes
+ * the UV pitch equal to luma pitch.
+ */
+ expect_buf_size += pitch * image->height;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * (image->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /* Height must be a multiple of 16 for macro-block format.*/
+ if (image->height & 15) {
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
+ "for fmt=%#010x height=%d\n", __func__,
+ image->fmt, image->height);
+ return -EINVAL;
+ }
+ expect_buf_size += pitch * image->height;
+ break;
+ default:
+ break;
+ }
+
+ if (buf->file_len < expect_buf_size) {
+ b2r2_log_warn(cont->dev, "%s: Invalid buffer size:\n"
+ "fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n",
+ __func__,
+ image->fmt, image->width, image->height, buf->file_len,
+ expect_buf_size);
+ return -EINVAL;
+ }
+
+ if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
+ b2r2_log_warn(cont->dev, "%s: Virtual pointers not supported"
+ " yet.\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(struct b2r2_control *cont, u32 color,
+ const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+
+static void setup_fill_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ enum b2r2_native_fmt fill_fmt = 0;
+ u32 src_color = req->user_req.src_color;
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Determine format in src_color */
+ switch (dst_img->fmt) {
+ /* ARGB formats */
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ } else {
+ /* SOURCE_FILL_RAW */
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
+ /*
+ * Color is read from a register,
+ * where it is stored in ABGR format.
+ * Set up IVMX.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BGR;
+ }
+ }
+ break;
+ /* YUV formats */
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) {
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ * Format of the supplied src_color is
+ * B2R2_BLT_FMT_32_BIT_AYUV8888.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ } else {
+ /* SOURCE_FILL_RAW */
+ bool dst_yuv_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV444_PACKED_PLANAR ==
+ dst_img->fmt;
+
+ bool dst_yuv_semi_planar =
+ B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt ||
+ B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE ==
+ dst_img->fmt;
+
+ if (dst_yuv_planar || dst_yuv_semi_planar) {
+ /*
+ * SOURCE_FILL_RAW cannot be supported
+ * with multi-buffer formats.
+ * Force a legal format to prevent B2R2
+ * from misbehaving.
+ */
+ fill_fmt = B2R2_NATIVE_AYCBCR8888;
+ } else {
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+ /*
+ * Re-arrange the color components from
+ * VUY(A) to (A)YUV
+ */
+ if (dst_img->fmt ==
+ B2R2_BLT_FMT_24_BIT_VUY888) {
+ u32 Y = src_color & 0xff;
+ u32 U = src_color & 0xff00;
+ u32 V = src_color & 0xff0000;
+ src_color = (Y << 16) | U | (V >> 16);
+ } else if (dst_img->fmt ==
+ B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ u32 A = src_color & 0xff;
+ u32 Y = src_color & 0xff00;
+ u32 U = src_color & 0xff0000;
+ u32 V = src_color & 0xff000000;
+ src_color = (A << 24) |
+ (Y << 8) |
+ (U >> 8) |
+ (V >> 24);
+ }
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to
+ * RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ default:
+ /*
+ * Set up IVMX
+ * The destination format is in fact YUV,
+ * but the input stage stores the data in
+ * an intermediate buffer which is RGB.
+ * Hence the conversion from YUV to RGB.
+ */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ }
+ }
+ break;
+ default:
+ src_color = 0;
+ fill_fmt = B2R2_NATIVE_ARGB8888;
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ /* Set color fill on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = 0;
+ node->node.GROUP4.B2R2_STY =
+ (0 << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ fill_fmt |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL;
+ node->node.GROUP2.B2R2_S2CF = src_color;
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_input_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_img *src_img = &(req->user_req.src_img);
+ u32 src_pitch = 0;
+ /* horizontal and vertical scan order for out_buf */
+ enum b2r2_ty dst_hso = B2R2_TY_HSO_LEFT_TO_RIGHT;
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ u32 endianness = 0;
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ bool yuv_semi_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ struct b2r2_filter_spec *hf;
+ struct b2r2_filter_spec *vf;
+
+ bool use_h_filter = false;
+ bool use_v_filter = false;
+
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) &
+ req->user_req.flags) != 0) {
+ setup_fill_input_stage(req, node, out_buf);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+ return;
+ }
+
+ if (src_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ src_pitch = get_pitch(cont, src_img->fmt, src_img->width);
+ } else {
+ src_pitch = src_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s transform=%#010x\n",
+ __func__, req->user_req.transform);
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+
+ use_h_filter = h_scf != (1 << 10);
+ use_v_filter = v_scf != (1 << 10);
+
+ /* B2R2_BLT_FLAG_BLUR overrides any scaling filter. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ /* Configure horizontal rescale */
+ if (h_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling horizontally by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ h_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ /* Configure vertical rescale */
+ if (v_scf != (1 << 10)) {
+ b2r2_log_info(cont->dev, "%s: Scaling vertically by 0x%.8x"
+ "\ns(%d, %d)->d(%d, %d)\n", __func__,
+ v_scf, src_rect->width, src_rect->height,
+ dst_rect->width, dst_rect->height);
+ }
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+
+ /* Adjustments that depend on the source format */
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped so
+ * it is YVU and not YUV.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (src_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /*
+ * Luma handled in the same way
+ * for all YUV multi-buffer formats.
+ * Set luma rescale registers.
+ */
+ u32 rsf_luma = 0;
+ u32 rzi_luma = 0;
+
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_IVMX_ENABLED | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_IVMX | B2R2_CIC_RESIZE_LUMA;
+
+ if (src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ node->node.GROUP10.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ rsf_luma |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rzi_luma |= B2R2_RZI_DEFAULT_HNB_REPEAT;
+
+ rsf_luma |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ rzi_luma |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT;
+
+ node->node.GROUP10.B2R2_RSF = rsf_luma;
+ node->node.GROUP10.B2R2_RZI = rzi_luma;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (v_scf >> 1) << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ vf = b2r2_filter_find(v_scf >> 1);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf >> 1);
+ use_h_filter = true;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT;
+ /* Select suitable filter for chroma */
+ hf = b2r2_filter_find(h_scf);
+ vf = b2r2_filter_find(v_scf);
+ use_h_filter = true;
+ use_v_filter = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ /*
+ * Set the filter control and rescale registers.
+ * GROUP9 registers are used for all single-buffer formats
+ * or for chroma in case of multi-buffer YUV formats.
+ * h/v_filter is now appropriately selected for chroma scaling,
+ * be it YUV multi-buffer, or single-buffer raster format.
+ * B2R2_BLT_FLAG_BLUR overrides any scaling filter.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) {
+ use_h_filter = true;
+ use_v_filter = true;
+ hf = b2r2_filter_blur();
+ vf = b2r2_filter_blur();
+ }
+
+ if (use_h_filter && hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_HFP = hf->h_coeffs_phys_addr;
+ }
+
+ if (use_v_filter && vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ node->node.GROUP9.B2R2_VFP = vf->v_coeffs_phys_addr;
+ }
+
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI |= rzi;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * Flip transform is done before potential rotation.
+ * This can be achieved with appropriate scan order.
+ * Transform stage will only do rotation.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ dst_hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ dst_hso | dst_vso;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ else
+ cb_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+
+ switch (src_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (src_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (src_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = src_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ src_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ src_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance has full resolution, same as luminance.*/
+ chroma_pitch = src_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * src_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * src_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->src_resolved.physical_address +
+ src_pitch * src_img->height;
+ u32 chroma_pitch = src_pitch;
+
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ src_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, src_img->fmt) |
+ get_alpha_range(cont, src_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ if ((req->user_req.flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO = B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = req->clut_phys_addr;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_transform_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf)
+{
+ /* vertical scan order for out_buf */
+ enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+ enum b2r2_blt_transform transform = req->user_req.transform;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Scan order must be flipped otherwise contents will
+ * be mirrored vertically. Leftmost column of in_buf
+ * would become top instead of bottom row of out_buf.
+ */
+ dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT | dst_vso;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+static void setup_mask_stage(const struct b2r2_blt_request req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf,
+ struct b2r2_work_buf *in_buf);
+*/
+
+static void setup_dst_read_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *out_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 endianness = 0;
+ bool yuv_semi_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv_planar =
+ dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ u32 dst_pitch = 0;
+ struct b2r2_control *cont = req->instance->control;
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else {
+ dst_pitch = dst_img->pitch;
+ }
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ /* Adjustments that depend on the destination format */
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ /*
+ * Setup input VMX to convert YVU to RGB 601 VIDEO
+ * Chroma components are swapped
+ * so it is YVU and not YUV.
+ */
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ /*
+ * Set up IVMX.
+ * For B2R2_BLT_FMT_32_BIT_YUV888 and
+ * B2R2_BLT_FMT_32_BIT_AYUV8888
+ * the color components are laid out in memory as V, U, Y, (A)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO;
+
+ /*
+ * Re-arrange color components from VUY(A) to (A)YUV
+ * for input VMX to work on them further.
+ */
+ if (dst_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: {
+ /* Set up IVMX */
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+
+ if (dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_img->fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO;
+ }
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /* Chrominance is the same size as luminance.*/
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ default:
+ break;
+ }
+ /* Set the filter control and rescale registers for chroma */
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ node->node.GROUP9.B2R2_RSF |= rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL | B2R2_CIC_RESIZE_CHROMA;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = out_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ if (yuv_planar) {
+ /*
+ * Set up chrominance buffers on source 1 and 2,
+ * luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ */
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_img->fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP3.B2R2_SBA = cr_addr;
+ node->node.GROUP3.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP4.B2R2_SBA = cb_addr;
+ node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_SOURCE_3;
+ } else if (yuv_semi_planar) {
+ /*
+ * Set up chrominance buffer on source 2, luminance on source 3.
+ * dst_pitch and physical_address apply to luminance,
+ * corresponding chrominance values have to be derived.
+ * U and V are interleaved at half the luminance resolution,
+ * which makes the pitch of the UV plane equal
+ * to luminance pitch.
+ */
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+
+ node->node.GROUP4.B2R2_SBA = chroma_addr;
+ node->node.GROUP4.B2R2_STY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP5.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3;
+ } else {
+ /* single buffer format */
+ node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address;
+ node->node.GROUP4.B2R2_STY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ endianness;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_blend_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *bg_buf,
+ struct b2r2_work_buf *fg_buf)
+{
+ u32 global_alpha = req->user_req.global_alpha;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ node->node.GROUP0.B2R2_ACK = 0;
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)) {
+ /* Some kind of blending needs to be done. */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT)
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /*
+ * global_alpha register accepts 0..128 range,
+ * global_alpha in the request is 0..255, remap needed.
+ */
+ if (req->user_req.flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+ if (global_alpha == 255)
+ global_alpha = 128;
+ else
+ global_alpha >>= 1;
+ } else {
+ /*
+ * Use solid global_alpha
+ * if global alpha blending is not set.
+ */
+ global_alpha = 128;
+ }
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << (B2R2_ACK_GALPHA_ROPID_SHIFT);
+
+ /* Set background on SRC1 channel */
+ node->node.GROUP3.B2R2_SBA = bg_buf->phys_addr;
+ node->node.GROUP3.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set foreground on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /* Set target buffer */
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM |
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_1 |
+ B2R2_CIC_SOURCE_2;
+ } else {
+ /*
+ * No blending, foreground goes on SRC2. No global alpha.
+ * EMACSOC TODO: The blending stage should be skipped altogether
+ * if no blending is to be done. Probably could go directly from
+ * transform to writeback.
+ */
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+
+ node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (B2R2_GENERIC_WORK_BUF_PITCH <<
+ B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+static void setup_writeback_stage(const struct b2r2_blt_request *req,
+ struct b2r2_node *node,
+ struct b2r2_work_buf *in_buf)
+{
+ const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
+ const enum b2r2_blt_fmt dst_fmt = dst_img->fmt;
+ const bool yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+
+ const bool yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ const u32 group4_b2r2_sty =
+ (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ B2R2_GENERIC_WORK_BUF_FMT |
+ B2R2_TY_ALPHA_RANGE_255 |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ u32 dst_dither = 0;
+ u32 dst_pitch = 0;
+ u32 endianness = 0;
+
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (dst_img->pitch == 0) {
+ /* Determine pitch based on format and width of the image. */
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
+ } else
+ dst_pitch = dst_img->pitch;
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DITHER) != 0)
+ dst_dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ /* Set target buffer(s) */
+ if (yuv_planar_dst) {
+ /*
+ * three nodes required to write the output.
+ * Luma, blue chroma and red chroma.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 cb_addr = 0;
+ u32 cr_addr = 0;
+ u32 chroma_pitch = 0;
+ bool swapped_chroma =
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (swapped_chroma)
+ cr_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ else
+ cb_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ (dst_img->height >> 1);
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ chroma_pitch = dst_pitch >> 1;
+ if (swapped_chroma)
+ cb_addr = cr_addr + chroma_pitch *
+ dst_img->height;
+ else
+ cr_addr = cb_addr + chroma_pitch *
+ dst_img->height;
+ /*
+ * YUV422 or YVU422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ chroma_pitch = dst_pitch;
+ cr_addr =
+ cb_addr + chroma_pitch * dst_img->height;
+ /*
+ * No scaling required since
+ * chrominance is not subsampled.
+ */
+ default:
+ break;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Blue chroma (U-component)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cb_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+
+ /*
+ * Red chroma (V-component)
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * CR_NOT_CB.
+ */
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = cr_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TTY_CB_NOT_CR |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ node->node.GROUP15.B2R2_VMX0 = B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 = B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 = B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 = B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+ if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else if (yuv_semi_planar_dst) {
+ /*
+ * two nodes required to write the output.
+ * One node for luma and one for interleaved chroma
+ * components.
+ */
+ u32 fctl = 0;
+ u32 rsf = 0;
+ const u32 group0_b2r2_ins =
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED |
+ B2R2_INS_IVMX_ENABLED;
+ const u32 group0_b2r2_cic =
+ B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_CLIP_WINDOW |
+ B2R2_CIC_IVMX;
+
+ u32 chroma_addr = req->dst_resolved.physical_address +
+ dst_pitch * dst_img->height;
+ u32 chroma_pitch = dst_pitch;
+ enum b2r2_native_fmt dst_native_fmt =
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR) {
+ /*
+ * Chrominance is always half the luminance size
+ * so chrominance resizer is always active.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ } else {
+ /*
+ * YUV422
+ * Chrominance is always half the luminance size
+ * only in horizontal direction.
+ */
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER;
+
+ rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT);
+ rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT;
+ rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT);
+ rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT;
+ }
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ /* bypass ALU, no blending here. Handled in its own stage. */
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS = group0_b2r2_ins;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic;
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+
+ /* Chroma (UV-components)*/
+ node = node->next;
+ node->node.GROUP1.B2R2_TBA = chroma_addr;
+ node->node.GROUP1.B2R2_TTY =
+ (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ dst_native_fmt | alpha_range |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ } else {
+ node->node.GROUP15.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO;
+ node->node.GROUP15.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO;
+ }
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS =
+ group0_b2r2_ins | B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic |
+ B2R2_CIC_FILTER_CONTROL |
+ B2R2_CIC_RESIZE_CHROMA;
+
+ /* Set the filter control and rescale registers */
+ node->node.GROUP8.B2R2_FCTL = fctl;
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ } else {
+ /* single buffer target */
+
+ /* Set up OVMX */
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 = B2R2_VMX0_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX1 = B2R2_VMX1_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX2 = B2R2_VMX2_RGB_TO_BGR;
+ node->node.GROUP16.B2R2_VMX3 = B2R2_VMX3_RGB_TO_BGR;
+ break;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO;
+ break;
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX;
+ node->node.GROUP16.B2R2_VMX0 =
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX1 =
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX2 =
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO;
+ node->node.GROUP16.B2R2_VMX3 =
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO;
+
+ /*
+ * Re-arrange color components from (A)YUV to VUY(A)
+ * when bytes are stored in memory.
+ */
+ if (dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
+ node->node.GROUP1.B2R2_TTY =
+ (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
+ B2R2_TY_HSO_LEFT_TO_RIGHT |
+ B2R2_TY_VSO_TOP_TO_BOTTOM |
+ dst_dither |
+ endianness;
+
+ node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM |
+ B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_SOURCE_2 | B2R2_CIC_CLIP_WINDOW;
+
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(cont, req->user_req.src_color,
+ req->user_req.src_img.fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ /* Set source buffer on SRC2 channel */
+ node->node.GROUP4.B2R2_SBA = in_buf->phys_addr;
+ node->node.GROUP4.B2R2_STY = group4_b2r2_sty;
+ }
+ /*
+ * Writeback is the last stage. Terminate the program chain
+ * to prevent out-of-control B2R2 execution.
+ */
+ node->node.GROUP0.B2R2_NIP = 0;
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
+
+/*
+ * Public functions
+ */
+void b2r2_generic_init(struct b2r2_control *cont)
+{
+
+}
+
+void b2r2_generic_exit(struct b2r2_control *cont)
+{
+
+}
+
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count)
+{
+ /*
+ * Need at least 4 nodes, read or fill input, read dst, blend
+ * and write back the result */
+ u32 n_nodes = 4;
+ /* Need at least 2 bufs, 1 for blend output and 1 for input */
+ u32 n_work_bufs = 2;
+ /* Horizontal and vertical scaling factors in 6.10 fixed point format */
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ enum b2r2_blt_fmt dst_fmt = 0;
+ bool is_src_fill = false;
+ bool yuv_planar_dst;
+ bool yuv_semi_planar_dst;
+ struct b2r2_blt_rect src_rect;
+ struct b2r2_blt_rect dst_rect;
+ struct b2r2_control *cont = req->instance->control;
+
+ if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
+ work_buf_count == NULL || node_count == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid in or out pointers:\n"
+ "req=0x%p\n"
+ "work_buf_width=0x%p work_buf_height=0x%p "
+ "work_buf_count=0x%p\n"
+ "node_count=0x%p.\n",
+ __func__,
+ req,
+ work_buf_width, work_buf_height,
+ work_buf_count,
+ node_count);
+ return -EINVAL;
+ }
+
+ dst_fmt = req->user_req.dst_img.fmt;
+
+ is_src_fill = (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ yuv_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
+ yuv_semi_planar_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+
+ *node_count = 0;
+ *work_buf_width = 0;
+ *work_buf_height = 0;
+ *work_buf_count = 0;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ n_nodes++;
+ n_work_bufs++;
+ }
+
+ if ((yuv_planar_dst || yuv_semi_planar_dst) &&
+ (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source_fill_raw"
+ " and multi-buffer destination.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
+ (req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source and "
+ "destination color keying.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
+ B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "source_fill and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags &
+ (B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND |
+ B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND)) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
+ "blending and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) &&
+ (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: source mask"
+ "and color keying.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n",
+ __func__);
+ return -ENOSYS;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK)) {
+ enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_src =
+ src_fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ src_fmt == B2R2_BLT_FMT_8_BIT_A8) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source "
+ "color keying with YUV or pure alpha "
+ "formats.\n", __func__);
+ return -ENOSYS;
+ }
+ }
+
+ /* Check for invalid dimensions that would hinder scale calculations */
+ src_rect = req->user_req.src_rect;
+ dst_rect = req->user_req.dst_rect;
+ /* Check for invalid src_rect unless src_fill is enabled */
+ if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
+ src_rect.x + src_rect.width > req->user_req.src_img.width ||
+ src_rect.y + src_rect.height > req->user_req.src_img.height)) {
+ b2r2_log_warn(cont->dev, "%s: src_rect outside src_img:\n"
+ "src(x,y,w,h)=(%d, %d, %d, %d) "
+ "src_img(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.x, src_rect.y, src_rect.width, src_rect.height,
+ req->user_req.src_img.width,
+ req->user_req.src_img.height);
+ return -EINVAL;
+ }
+
+ if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Invalid source dimensions:\n"
+ "src(w,h)=(%d, %d).\n",
+ __func__,
+ src_rect.width, src_rect.height);
+ return -EINVAL;
+ }
+
+ if (dst_rect.width <= 0 || dst_rect.height <= 0) {
+ b2r2_log_warn(cont->dev, "%s: Invalid dest dimensions:\n"
+ "dst(w,h)=(%d, %d).\n",
+ __func__,
+ dst_rect.width, dst_rect.height);
+ return -EINVAL;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for invalid image params */
+ if (!is_src_fill && validate_buf(cont, &(req->user_req.src_img),
+ &(req->src_resolved)))
+ return -EINVAL;
+
+ if (validate_buf(cont, &(req->user_req.dst_img), &(req->dst_resolved)))
+ return -EINVAL;
+
+ if (is_src_fill) {
+ /*
+ * Params correct for a source fill operation.
+ * No need for further checking.
+ */
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d "
+ "buf_count=%d node_count=%d\n", __func__,
+ *work_buf_width, *work_buf_height,
+ *work_buf_count, *node_count);
+ return 0;
+ }
+
+ /*
+ * Calculate scaling factors, all transform enum values
+ * that include rotation have the CCW_ROT_90 bit set.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect.width << 10) / dst_rect.height;
+ v_scf = (src_rect.height << 10) / dst_rect.width;
+ } else {
+ h_scf = (src_rect.width << 10) / dst_rect.width;
+ v_scf = (src_rect.height << 10) / dst_rect.height;
+ }
+
+ /* Check for degenerate/out_of_range scaling factors. */
+ if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
+ b2r2_log_warn(cont->dev,
+ "%s: Dimensions result in degenerate or "
+ "out of range scaling:\n"
+ "src(w,h)=(%d, %d) "
+ "dst(w,h)=(%d,%d).\n"
+ "h_scf=0x%.8x, v_scf=0x%.8x\n",
+ __func__,
+ src_rect.width, src_rect.height,
+ dst_rect.width, dst_rect.height,
+ h_scf, v_scf);
+ return -EINVAL;
+ }
+
+ if (yuv_planar_dst)
+ n_nodes += 2;
+ else if (yuv_semi_planar_dst)
+ n_nodes++;
+
+ *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH;
+ *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
+ *work_buf_count = n_work_bufs;
+ *node_count = n_nodes;
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d buf_count=%d "
+ "node_count=%d\n", __func__, *work_buf_width,
+ *work_buf_height, *work_buf_count, *node_count);
+ return 0;
+}
+
+/*
+ *
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count)
+{
+ struct b2r2_node *node = NULL;
+ struct b2r2_work_buf *in_buf = NULL;
+ struct b2r2_work_buf *out_buf = NULL;
+ struct b2r2_work_buf *empty_buf = NULL;
+ struct b2r2_control *cont = req->instance->control;
+
+#ifdef B2R2_GENERIC_DEBUG
+ u32 needed_bufs = 0;
+ u32 needed_nodes = 0;
+ s32 work_buf_width = 0;
+ s32 work_buf_height = 0;
+ u32 n_nodes = 0;
+ int invalid_req = b2r2_generic_analyze(req, &work_buf_width,
+ &work_buf_height, &needed_bufs,
+ &needed_nodes);
+ if (invalid_req < 0) {
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid request supplied, ec=%d\n",
+ __func__, invalid_req);
+ return -EINVAL;
+ }
+
+ node = first;
+
+ while (node != NULL) {
+ n_nodes++;
+ node = node->next;
+ }
+ if (n_nodes < needed_nodes) {
+ b2r2_log_warn(cont->dev, "%s: Not enough nodes %d < %d.\n",
+ __func__, n_nodes, needed_nodes);
+ return -EINVAL;
+ }
+
+ if (buf_count < needed_bufs) {
+ b2r2_log_warn(cont->dev, "%s: Not enough buffers %d < %d.\n",
+ __func__, buf_count, needed_bufs);
+ return -EINVAL;
+ }
+
+#endif
+
+ reset_nodes(cont, first);
+ node = first;
+ empty_buf = tmp_bufs;
+ out_buf = empty_buf;
+ empty_buf++;
+ /* Prepare input tile. Color_fill or read from src */
+ setup_input_stage(req, node, out_buf);
+ in_buf = out_buf;
+ out_buf = empty_buf;
+ empty_buf++;
+ node = node->next;
+
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ setup_transform_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ /* EMACSOC TODO: mask */
+ /*
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ setup_mask_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ out_buf = empty_buf++;
+ }
+ */
+ /* Read the part of destination that will be updated */
+ setup_dst_read_stage(req, node, out_buf);
+ node = node->next;
+ setup_blend_stage(req, node, out_buf, in_buf);
+ node = node->next;
+ in_buf = out_buf;
+ setup_writeback_stage(req, node, in_buf);
+ return 0;
+}
+
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area)
+{
+ /*
+ * Nodes come in the following order: <input stage>, [transform],
+ * [src_mask], <dst_read>, <blend>, <writeback>
+ */
+ struct b2r2_node *node = first;
+ const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect);
+ const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect);
+ const enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt;
+ bool yuv_multi_buffer_src =
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ const enum b2r2_blt_fmt dst_fmt = req->user_req.dst_img.fmt;
+ const bool yuv_multi_buffer_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ s32 h_scf = 1 << 10;
+ s32 v_scf = 1 << 10;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ u32 b2r2_rzi = 0;
+ s32 clip_top = 0;
+ s32 clip_left = 0;
+ s32 clip_bottom = req->user_req.dst_img.height - 1;
+ s32 clip_right = req->user_req.dst_img.width - 1;
+ /* Dst coords inside the dst_rect, not the buffer */
+ s32 dst_x = dst_rect_area->x;
+ s32 dst_y = dst_rect_area->y;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ h_scf = (src_rect->width << 10) / dst_rect->height;
+ v_scf = (src_rect->height << 10) / dst_rect->width;
+ } else {
+ h_scf = (src_rect->width << 10) / dst_rect->width;
+ v_scf = (src_rect->height << 10) / dst_rect->height;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * Normally the inverse transform for 90 degree rotation
+ * is given by:
+ * | 0 1| |x| | y|
+ * | | X | | = | |
+ * |-1 0| |y| |-x|
+ * but screen coordinates are flipped in y direction
+ * (compared to usual Cartesian coordinates), hence the offsets.
+ */
+ src_x = (dst_rect->height - dst_y - dst_rect_area->height) *
+ h_scf;
+ src_y = dst_x * v_scf;
+ src_w = dst_rect_area->height * h_scf;
+ src_h = dst_rect_area->width * v_scf;
+ } else {
+ src_x = dst_x * h_scf;
+ src_y = dst_y * v_scf;
+ src_w = dst_rect_area->width * h_scf;
+ src_h = dst_rect_area->height * v_scf;
+ }
+
+ b2r2_rzi |= ((src_x & 0x3ff) << B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular tile.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination tile, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination tile maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination tile is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ * However, the reading should not start at x == 5.0
+ * but a bit inside, namely x == 5.4
+ * The B2R2_RZI register is used to instruct the HW to do so.
+ * It contains the fractional part that will be added to
+ * the first pixel coordinate, before incrementing the current source
+ * coordinate with the step specified in B2R2_RSF register.
+ * The same applies to scaling in vertical direction.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ /*
+ * EMACSOC TODO: Remove this debug clamp, once tile size
+ * is taken into account in generic_analyze()
+ */
+ if (src_w > 128)
+ src_w = 128;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = src_rect->width - src_x - src_w;
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src/dst_rect coordinates into true
+ * src/dst_buffer coordinates
+ */
+ src_x += src_rect->x;
+ src_y += src_rect->y;
+
+ dst_x += dst_rect->x;
+ dst_y += dst_rect->y;
+
+ /*
+ * Clamp the src coords to buffer dimensions
+ * to prevent illegal reads.
+ */
+ if (src_x < 0)
+ src_x = 0;
+
+ if (src_y < 0)
+ src_y = 0;
+
+ if ((src_x + src_w) > req->user_req.src_img.width)
+ src_w = req->user_req.src_img.width - src_x;
+
+ if ((src_y + src_h) > req->user_req.src_img.height)
+ src_h = req->user_req.src_img.height - src_y;
+
+
+ /* The input node */
+ if (yuv_multi_buffer_src) {
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP10.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP10.B2R2_RZI |= b2r2_rzi;
+
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ switch (src_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((src_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((src_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |= (b2r2_rzi >> 1) &
+ ((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((src_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ if (src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ src_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ node->node.GROUP9.B2R2_RZI |=
+ (((src_x & 0x3ff) >> 1) <<
+ B2R2_RZI_HSRC_INIT_SHIFT) |
+ ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP4.B2R2_SSZ;
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ break;
+ default:
+ break;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY =
+ ((src_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((src_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Clear and set only the SRC_INIT bits */
+ node->node.GROUP9.B2R2_RZI &=
+ ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT));
+ node->node.GROUP9.B2R2_RZI |= b2r2_rzi;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ /*
+ * dst_rect_area coordinates are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped.
+ * Horizontal and vertical flips are accomplished with
+ * suitable scanning order while writing
+ * to the temporary buffer.
+ */
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->width - 1) & 0xffff) <<
+ B2R2_XY_X_SHIFT;
+ }
+
+ if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) {
+ node->node.GROUP1.B2R2_TXY |=
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+ }
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (req->user_req.flags &
+ (B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
+ /*
+ * Scan order for source fill should always be left-to-right
+ * and top-to-bottom. Fill the input tile from top left.
+ */
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ;
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Input node done.\n", __func__);
+ }
+
+ /* Transform */
+ if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) {
+ /*
+ * Transform node operates on temporary buffers.
+ * Content always at top left, but scanning order
+ * has to be flipped during rotation.
+ * Width and height need to be considered as well, since
+ * a tile may not necessarily be filled completely.
+ * dst_rect_area dimensions are specified
+ * after potential rotation.
+ * Input is read before rotation, hence the width and height
+ * need to be swapped on src.
+ */
+ node = node->next;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ /* Bottom line written first */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_rect_area->height - 1) & 0xffff) <<
+ B2R2_XY_Y_SHIFT;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Tranform node done.\n", __func__);
+ }
+ }
+
+ /* Source mask */
+ if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) {
+ node = node->next;
+ /*
+ * Same coords for mask as for the input stage.
+ * Should the mask be transformed together with source?
+ * EMACSOC TODO: Apply mask before any
+ * transform/scaling is done.
+ * Otherwise it will be dst_ not src_mask.
+ */
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Source mask node done.\n", __func__);
+ }
+ }
+
+ /* dst_read */
+ if (yuv_multi_buffer_dst) {
+ s32 dst_w = dst_rect_area->width;
+ s32 dst_h = dst_rect_area->height;
+ bool yuv420_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+
+ bool yuv422_dst =
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
+ node = node->next;
+ /* Luma on SRC3 */
+ node->node.GROUP5.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP5.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (yuv420_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Chroma is half the size of luma. Must round up
+ * the chroma size to handle cases when luma size is not
+ * divisible by 2.
+ * E.g. luma width==7 requires chroma width==4.
+ * Chroma width==7/2==3 is only enough
+ * for luma width==6.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU420_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (yuv422_dst) {
+ /*
+ * Chroma goes on SRC2 and potentially on SRC1.
+ * Now chroma is half the size of luma
+ * only in horizontal direction.
+ * Same rounding applies as for 420 formats above,
+ * except it is only done horizontally.
+ */
+ node->node.GROUP4.B2R2_SXY =
+ (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt ==
+ B2R2_BLT_FMT_YVU422_PACKED_PLANAR) {
+ node->node.GROUP3.B2R2_SXY =
+ node->node.GROUP4.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ =
+ node->node.GROUP4.B2R2_SSZ;
+ }
+ } else if (dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) {
+ /*
+ * Chroma goes on SRC2 and SRC1.
+ * It is the same size as luma.
+ */
+ node->node.GROUP4.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP4.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ node->node.GROUP3.B2R2_SXY = node->node.GROUP5.B2R2_SXY;
+ node->node.GROUP3.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ;
+ }
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ } else {
+ node = node->next;
+ node->node.GROUP4.B2R2_SXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ }
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s dst_read node done.\n", __func__);
+ }
+
+ /* blend */
+ node = node->next;
+ node->node.GROUP3.B2R2_SXY = 0;
+ node->node.GROUP3.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ /* contents of the foreground temporary buffer always at top left */
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP1.B2R2_TXY = 0;
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Blend node done.\n", __func__);
+ }
+
+ /* writeback */
+ node = node->next;
+ if ((req->user_req.flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0) {
+ clip_left = req->user_req.dst_clip_rect.x;
+ clip_top = req->user_req.dst_clip_rect.y;
+ clip_right = clip_left + req->user_req.dst_clip_rect.width - 1;
+ clip_bottom = clip_top + req->user_req.dst_clip_rect.height - 1;
+ }
+ /*
+ * Clamp the dst clip rectangle to buffer dimensions to prevent
+ * illegal writes. An illegal clip rectangle, e.g. outside the
+ * buffer will be ignored, resulting in nothing being clipped.
+ */
+ if (clip_left < 0 || req->user_req.dst_img.width <= clip_left)
+ clip_left = 0;
+
+ if (clip_top < 0 || req->user_req.dst_img.height <= clip_top)
+ clip_top = 0;
+
+ if (clip_right < 0 || req->user_req.dst_img.width <= clip_right)
+ clip_right = req->user_req.dst_img.width - 1;
+
+ if (clip_bottom < 0 || req->user_req.dst_img.height <= clip_bottom)
+ clip_bottom = req->user_req.dst_img.height - 1;
+
+ /*
+ * Only allow writing inside the clip rect.
+ * INTNL bit in B2R2_CWO should be zero.
+ */
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (yuv_multi_buffer_dst) {
+ const s32 dst_w = dst_rect_area->width;
+ const s32 dst_h = dst_rect_area->height;
+ int i = 0;
+ /* Number of nodes required to write chroma output */
+ int n_nodes = 1;
+ if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR ||
+ dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR)
+ n_nodes = 2;
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ /* Luma (Y-component) */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Writeback luma node done.\n", __func__);
+ }
+
+ node = node->next;
+
+ /*
+ * Chroma components. 1 or 2 nodes
+ * for semi-planar or planar buffer respectively.
+ */
+ for (i = 0; i < n_nodes && node != NULL; ++i) {
+
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Chroma is half the size of luma.
+ * Must round up the chroma size to handle
+ * cases when luma size is not divisible by 2.
+ * E.g. luma_width==7 requires chroma_width==4.
+ * Chroma_width==7/2==3 is only enough
+ * for luma_width==6.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ (((dst_y & 0xffff) >> 1) <<
+ B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((((dst_h + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ /*
+ * Now chroma is half the size of luma only
+ * in horizontal direction.
+ * Same rounding applies as
+ * for 420 formats above, except it is only
+ * done horizontally.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ (((dst_x & 0xffff) >> 1) <<
+ B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((((dst_w + 1) & 0xfff) >> 1) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ /*
+ * Chroma has the same resolution as luma.
+ */
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_w & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_h & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ break;
+ default:
+ break;
+ }
+
+ node->node.GROUP6.B2R2_CWO =
+ ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) |
+ ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) |
+ ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback chroma "
+ "node %d of %d done.\n",
+ __func__, i + 1, n_nodes);
+ }
+
+ node = node->next;
+ }
+ } else {
+ node->node.GROUP4.B2R2_SXY = 0;
+ node->node.GROUP4.B2R2_SSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT);
+ node->node.GROUP1.B2R2_TSZ =
+ ((dst_rect_area->width & 0xfff) <<
+ B2R2_SZ_WIDTH_SHIFT) |
+ ((dst_rect_area->height & 0xfff) <<
+ B2R2_SZ_HEIGHT_SHIFT);
+
+ if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
+ dst_rect_area->y == 0) {
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback node done.\n",
+ __func__);
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
+}
diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h
new file mode 100644
index 00000000000..3b22f654deb
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_generic.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 generic. Full coverage of user interface but
+ * non optimized implementation. For Fallback purposes.
+ *
+ * Author: Maciej Socha <maciej.socha@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_GENERIC_H
+#define _LINUX_VIDEO_B2R2_GENERIC_H
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+/**
+ * b2r2_generic_init()
+ */
+void b2r2_generic_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_exit()
+ */
+void b2r2_generic_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_generic_analyze()
+ */
+int b2r2_generic_analyze(const struct b2r2_blt_request *req,
+ s32 *work_buf_width,
+ s32 *work_buf_height,
+ u32 *work_buf_count,
+ u32 *node_count);
+/**
+ * b2r2_generic_configure()
+ */
+int b2r2_generic_configure(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_work_buf *tmp_bufs,
+ u32 buf_count);
+/**
+ * b2r2_generic_set_areas()
+ */
+void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
+ struct b2r2_node *first,
+ struct b2r2_blt_rect *dst_rect_area);
+#endif
diff --git a/drivers/video/b2r2/b2r2_global.h b/drivers/video/b2r2/b2r2_global.h
new file mode 100644
index 00000000000..38cf74bb753
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_global.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 global definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_GLOBAL_H
+#define __B2R2_GLOBAL_H
+
+/** Sources involved */
+
+struct b2r2_system {
+ unsigned int B2R2_NIP;
+ unsigned int B2R2_CIC;
+ unsigned int B2R2_INS;
+ unsigned int B2R2_ACK;
+};
+
+struct b2r2_target {
+ unsigned int B2R2_TBA;
+ unsigned int B2R2_TTY;
+ unsigned int B2R2_TXY;
+ unsigned int B2R2_TSZ;
+};
+
+struct b2r2_color_fill {
+ unsigned int B2R2_S1CF;
+ unsigned int B2R2_S2CF;
+};
+
+struct b2r2_src_config {
+ unsigned int B2R2_SBA;
+ unsigned int B2R2_STY;
+ unsigned int B2R2_SXY;
+ unsigned int B2R2_SSZ;
+};
+
+struct b2r2_clip {
+ unsigned int B2R2_CWO;
+ unsigned int B2R2_CWS;
+};
+
+struct b2r2_color_key {
+ unsigned int B2R2_KEY1;
+ unsigned int B2R2_KEY2;
+};
+
+struct b2r2_clut {
+ unsigned int B2R2_CCO;
+ unsigned int B2R2_CML;
+};
+
+struct b2r2_rsz_pl_mask {
+ unsigned int B2R2_FCTL;
+ unsigned int B2R2_PMK;
+};
+
+struct b2r2_Cr_luma_rsz {
+ unsigned int B2R2_RSF;
+ unsigned int B2R2_RZI;
+ unsigned int B2R2_HFP;
+ unsigned int B2R2_VFP;
+};
+
+struct b2r2_flikr_filter {
+ unsigned int B2R2_FF0;
+ unsigned int B2R2_FF1;
+ unsigned int B2R2_FF2;
+ unsigned int B2R2_FF3;
+};
+
+struct b2r2_xyl {
+ unsigned int B2R2_XYL;
+ unsigned int B2R2_XYP;
+};
+
+struct b2r2_sau {
+ unsigned int B2R2_SAR;
+ unsigned int B2R2_USR;
+};
+
+struct b2r2_vm {
+ unsigned int B2R2_VMX0;
+ unsigned int B2R2_VMX1;
+ unsigned int B2R2_VMX2;
+ unsigned int B2R2_VMX3;
+};
+
+struct b2r2_link_list {
+
+ struct b2r2_system GROUP0;
+ struct b2r2_target GROUP1;
+ struct b2r2_color_fill GROUP2;
+ struct b2r2_src_config GROUP3;
+ struct b2r2_src_config GROUP4;
+ struct b2r2_src_config GROUP5;
+ struct b2r2_clip GROUP6;
+ struct b2r2_clut GROUP7;
+ struct b2r2_rsz_pl_mask GROUP8;
+ struct b2r2_Cr_luma_rsz GROUP9;
+ struct b2r2_Cr_luma_rsz GROUP10;
+ struct b2r2_flikr_filter GROUP11;
+ struct b2r2_color_key GROUP12;
+ struct b2r2_xyl GROUP13;
+ struct b2r2_sau GROUP14;
+ struct b2r2_vm GROUP15;
+ struct b2r2_vm GROUP16;
+
+ unsigned int B2R2_RESERVED[2];
+};
+
+
+#endif /* !defined(__B2R2_GLOBAL_H) */
diff --git a/drivers/video/b2r2/b2r2_hw.h b/drivers/video/b2r2/b2r2_hw.h
new file mode 100644
index 00000000000..d492168913a
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_hw.h
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 hw definitions
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef B2R2_HW_H__
+#define B2R2_HW_H__
+
+#include <linux/bitops.h>
+
+/* Scaling works in strips 128 pixels wide */
+#define B2R2_RESCALE_MAX_WIDTH 128
+
+/* Rotation works in strips 16 pixels wide */
+#define B2R2_ROTATE_MAX_WIDTH 16
+
+/* B2R2 color formats */
+#define B2R2_COLOR_FORMAT_SHIFT 16
+enum b2r2_native_fmt {
+ /* RGB formats */
+ B2R2_NATIVE_RGB565 = 0x00 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_RGB888 = 0x01 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8565 = 0x04 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB8888 = 0x05 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB1555 = 0x06 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ARGB4444 = 0x07 << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* YCbCr formats */
+ B2R2_NATIVE_YCBCR888 = 0x10 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR422R = 0x12 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_AYCBCR8888 = 0x15 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MB = 0x14 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_R2B = 0x16 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YCBCR42X_MBN = 0x0e << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* CLUT formats */
+ B2R2_NATIVE_CLUT2 = 0x09 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_CLUT8 = 0x0b << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT44 = 0x0c << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_ACLUT88 = 0x0d << B2R2_COLOR_FORMAT_SHIFT,
+
+ /* Misc. formats */
+ B2R2_NATIVE_A1 = 0x18 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_A8 = 0x19 << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_YUV = 0x1e << B2R2_COLOR_FORMAT_SHIFT,
+ B2R2_NATIVE_BYTE = 0x1f << B2R2_COLOR_FORMAT_SHIFT,
+};
+
+/* B2R2_CIC register values */
+enum b2r2_cic {
+ B2R2_CIC_COLOR_FILL = BIT(1),/*0x00000002*/
+ B2R2_CIC_SOURCE_1 = BIT(2),/*0x00000004*/
+ B2R2_CIC_SOURCE_2 = BIT(3),/*0x00000008*/
+ B2R2_CIC_SOURCE_3 = BIT(4),/*0x00000010*/
+ B2R2_CIC_CLIP_WINDOW = BIT(5),/*0x00000020*/
+ B2R2_CIC_CLUT = BIT(6),/*0x00000040*/
+ B2R2_CIC_FILTER_CONTROL = BIT(7),/*0x00000080*/
+ B2R2_CIC_RESIZE_CHROMA = BIT(8),/*0x00000100*/
+ B2R2_CIC_RESIZE_LUMA = BIT(9),/*0x00000200*/
+ B2R2_CIC_FLICKER_COEFF = BIT(10),/*0x00000400*/
+ B2R2_CIC_COLOR_KEY = BIT(11),/*0x00000800*/
+ B2R2_CIC_XYL = BIT(12),/*0x00001000*/
+ B2R2_CIC_SAU = BIT(13),/*0x00002000*/
+ B2R2_CIC_IVMX = BIT(14),/*0x00004000*/
+ B2R2_CIC_OVMX = BIT(15),/*0x00008000*/
+ B2R2_CIC_PACEDOT = BIT(16),/*0x00010000*/
+ B2R2_CIC_VC1 = BIT(17)/*0x00020000*/
+};
+
+/* B2R2_INS register values */
+#define B2R2_INS_SOURCE_1_SHIFT 0
+#define B2R2_INS_SOURCE_2_SHIFT 3
+#define B2R2_INS_SOURCE_3_SHIFT 5
+#define B2R2_INS_IVMX_SHIFT 6
+#define B2R2_INS_CLUTOP_SHIFT 7
+#define B2R2_INS_RESCALE2D_SHIFT 8
+#define B2R2_INS_FLICK_FILT_SHIFT 9
+#define B2R2_INS_RECT_CLIP_SHIFT 10
+#define B2R2_INS_CKEY_SHIFT 11
+#define B2R2_INS_OVMX_SHIFT 12
+#define B2R2_INS_DEI_SHIFT 13
+#define B2R2_INS_PLANE_MASK_SHIFT 14
+#define B2R2_INS_XYL_SHIFT 15
+#define B2R2_INS_DOT_SHIFT 16
+#define B2R2_INS_VC1R_SHIFT 17
+#define B2R2_INS_ROTATION_SHIFT 18
+#define B2R2_INS_PACE_DOWN_SHIFT 30
+#define B2R2_INS_BLITCOMPIRQ_SHIFT 31
+enum b2r2_ins {
+ /* Source 1 config */
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_COPY = 0x4 << B2R2_INS_SOURCE_1_SHIFT,
+ B2R2_INS_SOURCE_1_DIRECT_FILL = 0x7 << B2R2_INS_SOURCE_1_SHIFT,
+
+ /* Source 2 config */
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_2_SHIFT,
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_2_SHIFT,
+
+ /* Source 3 config */
+ B2R2_INS_SOURCE_3_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_3_SHIFT,
+
+ /* Other configs */
+ B2R2_INS_IVMX_ENABLED = 0x1 << B2R2_INS_IVMX_SHIFT,
+ B2R2_INS_CLUTOP_ENABLED = 0x1 << B2R2_INS_CLUTOP_SHIFT,
+ B2R2_INS_RESCALE2D_ENABLED = 0x1 << B2R2_INS_RESCALE2D_SHIFT,
+ B2R2_INS_FLICK_FILT_ENABLED = 0x1 << B2R2_INS_FLICK_FILT_SHIFT,
+ B2R2_INS_RECT_CLIP_ENABLED = 0x1 << B2R2_INS_RECT_CLIP_SHIFT,
+ B2R2_INS_CKEY_ENABLED = 0x1 << B2R2_INS_CKEY_SHIFT,
+ B2R2_INS_OVMX_ENABLED = 0x1 << B2R2_INS_OVMX_SHIFT,
+ B2R2_INS_DEI_ENABLED = 0x1 << B2R2_INS_DEI_SHIFT,
+ B2R2_INS_PLANE_MASK_ENABLED = 0x1 << B2R2_INS_PLANE_MASK_SHIFT,
+ B2R2_INS_XYL_ENABLED = 0x1 << B2R2_INS_XYL_SHIFT,
+ B2R2_INS_DOT_ENABLED = 0x1 << B2R2_INS_DOT_SHIFT,
+ B2R2_INS_VC1R_ENABLED = 0x1 << B2R2_INS_VC1R_SHIFT,
+ B2R2_INS_ROTATION_ENABLED = 0x1 << B2R2_INS_ROTATION_SHIFT,
+ B2R2_INS_PACE_DOWN_ENABLED = 0x1 << B2R2_INS_PACE_DOWN_SHIFT,
+ B2R2_INS_BLITCOMPIRQ_ENABLED = 0x1 << B2R2_INS_BLITCOMPIRQ_SHIFT,
+
+};
+
+/* B2R2_ACK register values */
+#define B2R2_ACK_MODE_SHIFT 0
+#define B2R2_ACK_SWAP_FG_BG_SHIFT 4
+#define B2R2_ACK_GALPHA_ROPID_SHIFT 8
+#define B2R2_ACK_CKEY_BLUE_SHIFT 16
+#define B2R2_ACK_CKEY_GREEN_SHIFT 18
+#define B2R2_ACK_CKEY_RED_SHIFT 20
+#define B2R2_ACK_CKEY_SEL_SHIFT 22
+enum b2r2_ack {
+ /* ALU operation modes */
+ B2R2_ACK_MODE_LOGICAL_OPERATION = 0x1 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_NOT_PREMULT = 0x2 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BLEND_PREMULT = 0x3 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_FIRST_PASS = 0x4 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_BLEND = 0x5 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_BYPASS_S2_S3 = 0x7 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_LOGICAL_SECOND_PASS = 0x8 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_LOGICAL = 0x9 << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_NOT_PREMULT =
+ 0xa << B2R2_ACK_MODE_SHIFT,
+ B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_PREMULT = 0xb << B2R2_ACK_MODE_SHIFT,
+
+ /* ALU channel selection */
+ B2R2_ACK_SWAP_FG_BG = 0x1 << B2R2_ACK_SWAP_FG_BG_SHIFT,
+
+ /* Global alpha and ROP IDs */
+ B2R2_ACK_ROP_CLEAR = 0x0 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND = 0x1 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_REV = 0x2 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY = 0x3 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_AND_INV = 0x4 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOOP = 0x5 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_XOR = 0x6 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR = 0x7 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NOR = 0x8 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_EQUIV = 0x9 << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_INVERT = 0xa << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_REV = 0xb << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_COPY_INV = 0xc << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_OR_INV = 0xd << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_NAND = 0xe << B2R2_ACK_GALPHA_ROPID_SHIFT,
+ B2R2_ACK_ROP_SET = 0xf << B2R2_ACK_GALPHA_ROPID_SHIFT,
+
+ /* Color key configuration bits */
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_BLUE_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_RED_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_GREEN_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_RED_SHIFT,
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_RED_SHIFT,
+
+ /* Color key input selection */
+ B2R2_ACK_CKEY_SEL_DEST = 0x0 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_BEFORE_CLUT = 0x1 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT = 0x2 << B2R2_ACK_CKEY_SEL_SHIFT,
+ B2R2_ACK_CKEY_SEL_BLANKING_S2_ALPHA = 0x3 << B2R2_ACK_CKEY_SEL_SHIFT,
+};
+
+/* Common <S/T>TY defines */
+#define B2R2_TY_BITMAP_PITCH_SHIFT 0
+#define B2R2_TY_COLOR_FORM_SHIFT 16
+#define B2R2_TY_ALPHA_RANGE_SHIFT 21
+#define B2R2_TY_MB_ACCESS_MODE_SHIFT 23
+#define B2R2_TY_HSO_SHIFT 24
+#define B2R2_TY_VSO_SHIFT 25
+#define B2R2_TY_SUBBYTE_SHIFT 28
+#define B2R2_TY_ENDIAN_SHIFT 30
+#define B2R2_TY_SECURE_SHIFT 31
+
+/* Dummy enum for generalization of <S/T>TY registers */
+enum b2r2_ty {
+ /* Alpha range */
+ B2R2_TY_ALPHA_RANGE_128 = 0x0 << B2R2_TY_ALPHA_RANGE_SHIFT,
+ B2R2_TY_ALPHA_RANGE_255 = 0x1 << B2R2_TY_ALPHA_RANGE_SHIFT,
+
+ /* Access mode in macro-block organized frame buffers */
+ B2R2_TY_MB_ACCESS_MODE_FRAME = 0x0 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+ B2R2_TY_MB_ACCESS_MODE_FIELD = 0x1 << B2R2_TY_MB_ACCESS_MODE_SHIFT,
+
+ /* Horizontal scan order */
+ B2R2_TY_HSO_LEFT_TO_RIGHT = 0x0 << B2R2_TY_HSO_SHIFT,
+ B2R2_TY_HSO_RIGHT_TO_LEFT = 0x1 << B2R2_TY_HSO_SHIFT,
+
+ /* Vertical scan order */
+ B2R2_TY_VSO_TOP_TO_BOTTOM = 0x0 << B2R2_TY_VSO_SHIFT,
+ B2R2_TY_VSO_BOTTOM_TO_TOP = 0x1 << B2R2_TY_VSO_SHIFT,
+
+ /* Pixel ordering for sub-byte formats (position of right-most pixel) */
+ B2R2_TY_SUBBYTE_MSB = 0x0 << B2R2_TY_SUBBYTE_SHIFT,
+ B2R2_TY_SUBBYTE_LSB = 0x1 << B2R2_TY_SUBBYTE_SHIFT,
+
+ /* Bitmap endianess */
+ B2R2_TY_ENDIAN_BIG_NOT_LITTLE = 0x1 << B2R2_TY_ENDIAN_SHIFT,
+
+ /* Secureness of the target memory region */
+ B2R2_TY_SECURE_UNSECURE = 0x0 << B2R2_TY_SECURE_SHIFT,
+ B2R2_TY_SECURE_SECURE = 0x1 << B2R2_TY_SECURE_SHIFT,
+
+ /* Dummy to make sure the data type is large enough */
+ B2R2_TY_DUMMY = 0xffffffff,
+};
+
+/* B2R2_TTY register values */
+#define B2R2_TTY_CB_NOT_CR_SHIFT 22
+#define B2R2_TTY_RGB_ROUND_SHIFT 26
+#define B2R2_TTY_CHROMA_NOT_LUMA_SHIFT 27
+enum b2r2_tty {
+
+ /* Chroma component selection */
+ B2R2_TTY_CB_NOT_CR = 0x1 << B2R2_TTY_CB_NOT_CR_SHIFT,
+
+ /* RGB rounding mode */
+ B2R2_TTY_RGB_ROUND_NORMAL = 0x0 << B2R2_TTY_RGB_ROUND_SHIFT,
+ B2R2_TTY_RGB_ROUND_DITHER = 0x1 << B2R2_TTY_RGB_ROUND_SHIFT,
+
+ /* Component selection for splitted frame buffer formats */
+ B2R2_TTY_CHROMA_NOT_LUMA = 0x1 << B2R2_TTY_CHROMA_NOT_LUMA_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S1TY_A1_SUBST_SHIFT 22
+#define B2R2_S1TY_ROTATION_SHIFT 27
+#define B2R2_S1TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s1ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S1TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S1TY_A1_SUBST_SHIFT,
+
+ /* Input rectangle rotation (NOT YET IMPLEMENTED) */
+ B2R2_S1TY_ENABLE_ROTATION = 0x1 << B2R2_S1TY_ROTATION_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S1TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+ B2R2_S1TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S1TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S2TY_A1_SUBST_SHIFT 22
+#define B2R2_S2TY_CHROMA_LEFT_SHIFT 26
+#define B2R2_S2TY_RGB_EXPANSION_SHIFT 29
+enum b2r2_s2ty {
+
+ /* Alpha bit substitution mode for ARGB1555 */
+ B2R2_S2TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S2TY_A1_SUBST_SHIFT,
+
+ /* Chroma left extension */
+ B2R2_S2TY_CHROMA_LEFT_EXT_FOLLOWING_PIXEL = 0x0
+ << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+ B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE = 0x1 << B2R2_S2TY_CHROMA_LEFT_SHIFT,
+
+ /* RGB expansion mode */
+ B2R2_S2TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+ B2R2_S2TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S2TY_RGB_EXPANSION_SHIFT,
+};
+
+/* B2R2_S1TY register values */
+#define B2R2_S3TY_BLANK_ACC_SHIFT 26
+enum b2r2_s3ty {
+ /* Enables "blank" access on this source (nothing will be fetched from
+ memory) */
+ B2R2_S3TY_ENABLE_BLANK_ACCESS = 0x1 << B2R2_S3TY_BLANK_ACC_SHIFT,
+};
+
+/* B2R2_<S or T>XY register values */
+#define B2R2_XY_X_SHIFT 0
+#define B2R2_XY_Y_SHIFT 16
+
+/* B2R2_<S or T>SZ register values */
+#define B2R2_SZ_WIDTH_SHIFT 0
+#define B2R2_SZ_HEIGHT_SHIFT 16
+
+/* Clip window offset (top left coordinates) */
+#define B2R2_CWO_X_SHIFT 0
+#define B2R2_CWO_Y_SHIFT 16
+
+/* Clip window stop (bottom right coordinates) */
+#define B2R2_CWS_X_SHIFT 0
+#define B2R2_CWS_Y_SHIFT 16
+
+/* Color look-up table */
+enum b2r2_cco {
+ B2R2_CCO_CLUT_COLOR_CORRECTION = (1 << 16),
+ B2R2_CCO_CLUT_UPDATE = (1 << 18),
+ B2R2_CCO_CLUT_ON_S1 = (1 << 15)
+};
+
+/* Filter control (2D resize control) */
+enum b2r2_fctl {
+ /* Horizontal 2D filter mode */
+ B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(0),
+ B2R2_FCTL_HF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(1),
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER = BIT(2),
+
+ /* Vertical 2D filter mode */
+ B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(4),
+ B2R2_FCTL_VF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(5),
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER = BIT(6),
+
+ /* Alpha borders */
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_RIGHT = BIT(12),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_LEFT = BIT(13),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_BOTTOM = BIT(14),
+ B2R2_FCTL_ENABLE_ALPHA_BORDER_TOP = BIT(15),
+
+ /* Luma path horizontal 2D filter mode */
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER = BIT(24),
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER = BIT(25),
+
+ /* Luma path vertical 2D filter mode */
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER = BIT(28),
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER = BIT(29),
+};
+
+/* Resize scaling factor */
+#define B2R2_RSF_HSRC_INC_SHIFT 0
+#define B2R2_RSF_VSRC_INC_SHIFT 16
+
+/* Resizer initialization */
+#define B2R2_RZI_HSRC_INIT_SHIFT 0
+#define B2R2_RZI_HNB_REPEAT_SHIFT 12
+#define B2R2_RZI_VSRC_INIT_SHIFT 16
+#define B2R2_RZI_VNB_REPEAT_SHIFT 28
+
+/* Default values for the resizer */
+#define B2R2_RZI_DEFAULT_HNB_REPEAT (3 << B2R2_RZI_HNB_REPEAT_SHIFT)
+#define B2R2_RZI_DEFAULT_VNB_REPEAT (3 << B2R2_RZI_VNB_REPEAT_SHIFT)
+
+
+/* Bus plug configuration registers */
+enum b2r2_plug_opcode_size {
+ B2R2_PLUG_OPCODE_SIZE_8 = 0x3,
+ B2R2_PLUG_OPCODE_SIZE_16 = 0x4,
+ B2R2_PLUG_OPCODE_SIZE_32 = 0x5,
+ B2R2_PLUG_OPCODE_SIZE_64 = 0x6,
+};
+
+enum b2r2_plug_chunk_size {
+ B2R2_PLUG_CHUNK_SIZE_1 = 0x0,
+ B2R2_PLUG_CHUNK_SIZE_2 = 0x1,
+ B2R2_PLUG_CHUNK_SIZE_4 = 0x2,
+ B2R2_PLUG_CHUNK_SIZE_8 = 0x3,
+ B2R2_PLUG_CHUNK_SIZE_16 = 0x4,
+ B2R2_PLUG_CHUNK_SIZE_32 = 0x5,
+ B2R2_PLUG_CHUNK_SIZE_64 = 0x6,
+ B2R2_PLUG_CHUNK_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_message_size {
+ B2R2_PLUG_MESSAGE_SIZE_1 = 0x0,
+ B2R2_PLUG_MESSAGE_SIZE_2 = 0x1,
+ B2R2_PLUG_MESSAGE_SIZE_4 = 0x2,
+ B2R2_PLUG_MESSAGE_SIZE_8 = 0x3,
+ B2R2_PLUG_MESSAGE_SIZE_16 = 0x4,
+ B2R2_PLUG_MESSAGE_SIZE_32 = 0x5,
+ B2R2_PLUG_MESSAGE_SIZE_64 = 0x6,
+ B2R2_PLUG_MESSAGE_SIZE_128 = 0x7,
+};
+
+enum b2r2_plug_page_size {
+ B2R2_PLUG_PAGE_SIZE_64 = 0x0,
+ B2R2_PLUG_PAGE_SIZE_128 = 0x1,
+ B2R2_PLUG_PAGE_SIZE_256 = 0x2,
+};
+
+/* Default opcode size */
+#if defined(CONFIG_B2R2_OPSIZE_8)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_8
+#elif defined(CONFIG_B2R2_OPSIZE_16)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_16
+#elif defined(CONFIG_B2R2_OPSIZE_32)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_32
+#elif defined(CONFIG_B2R2_OPSIZE_64)
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_64
+#else
+# define B2R2_PLUG_OPCODE_SIZE_DEFAULT 0
+#endif
+
+/* Default chunk size */
+#if defined(CONFIG_B2R2_CHSIZE_1)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_1
+#elif defined(CONFIG_B2R2_CHSIZE_2)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_2
+#elif defined(CONFIG_B2R2_CHSIZE_4)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_4
+#elif defined(CONFIG_B2R2_CHSIZE_8)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_8
+#elif defined(CONFIG_B2R2_CHSIZE_16)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_16
+#elif defined(CONFIG_B2R2_CHSIZE_32)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_32
+#elif defined(CONFIG_B2R2_CHSIZE_64)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_64
+#elif defined(CONFIG_B2R2_CHSIZE_128)
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_128
+#else
+# define B2R2_PLUG_CHUNK_SIZE_DEFAULT 0
+#endif
+
+/* Default message size */
+#if defined(CONFIG_B2R2_MGSIZE_1)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_1
+#elif defined(CONFIG_B2R2_MGSIZE_2)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_2
+#elif defined(CONFIG_B2R2_MGSIZE_4)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_4
+#elif defined(CONFIG_B2R2_MGSIZE_8)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_8
+#elif defined(CONFIG_B2R2_MGSIZE_16)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_16
+#elif defined(CONFIG_B2R2_MGSIZE_32)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_32
+#elif defined(CONFIG_B2R2_MGSIZE_64)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_64
+#elif defined(CONFIG_B2R2_MGSIZE_128)
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_128
+#else
+# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT 0
+#endif
+
+/* Default page size */
+#if defined(CONFIG_B2R2_PGSIZE_64)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_64
+#elif defined(CONFIG_B2R2_PGSIZE_128)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_128
+#elif defined(CONFIG_B2R2_PGSIZE_256)
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_256
+#else
+# define B2R2_PLUG_PAGE_SIZE_DEFAULT 0
+#endif
+
+/* VMX register values for RGB to YUV color conversion */
+/* Magic numbers from 27.11 in DB8500_DesignSpecification_v2.5.pdf */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_VIDEO 0x107e4beb
+#define B2R2_VMX1_RGB_TO_YUV_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YUV_601_VIDEO 0xfa9ea483
+#define B2R2_VMX3_RGB_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_601_GFX 0x0e1e8bee
+#define B2R2_VMX1_RGB_TO_YUV_601_GFX 0x08420419
+#define B2R2_VMX2_RGB_TO_YUV_601_GFX 0xfb5ed471
+#define B2R2_VMX3_RGB_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_VIDEO 0x107e27f4
+#define B2R2_VMX1_RGB_TO_YUV_709_VIDEO 0x06e2dc13
+#define B2R2_VMX2_RGB_TO_YUV_709_VIDEO 0xfc5e6c83
+#define B2R2_VMX3_RGB_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_RGB_TO_YUV_709_GFX 0x0e3e6bf5
+#define B2R2_VMX1_RGB_TO_YUV_709_GFX 0x05e27410
+#define B2R2_VMX2_RGB_TO_YUV_709_GFX 0xfcdea471
+#define B2R2_VMX3_RGB_TO_YUV_709_GFX 0x08004080
+
+/* VMX register values for YUV to RGB color conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_VIDEO 0x2c440000
+#define B2R2_VMX1_YUV_TO_RGB_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_RGB_601_VIDEO 0x0004013f
+#define B2R2_VMX3_YUV_TO_RGB_601_VIDEO 0x34f21322
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_601_GFX 0x3324a800
+#define B2R2_VMX1_YUV_TO_RGB_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_RGB_601_GFX 0x0004a957
+#define B2R2_VMX3_YUV_TO_RGB_601_GFX 0x32121eeb
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_VIDEO 0x31440000
+#define B2R2_VMX1_YUV_TO_RGB_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_RGB_709_VIDEO 0x00040145
+#define B2R2_VMX3_YUV_TO_RGB_709_VIDEO 0x33b14b18
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_RGB_709_GFX 0x3964a800
+#define B2R2_VMX1_YUV_TO_RGB_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_RGB_709_GFX 0x0004a95f
+#define B2R2_VMX3_YUV_TO_RGB_709_GFX 0x307132df
+
+/* VMX register values for RGB to BGR conversion */
+#define B2R2_VMX0_RGB_TO_BGR 0x00000100
+#define B2R2_VMX1_RGB_TO_BGR 0x00040000
+#define B2R2_VMX2_RGB_TO_BGR 0x20000000
+#define B2R2_VMX3_RGB_TO_BGR 0x00000000
+
+/* VMX register values for BGR to YUV color conversion */
+/* Note: All BGR -> YUV values are calculated by multiplying
+ * the RGB -> YUV matrices [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |0 1 0|
+ * |1 0 0|
+ * Essentially swapping first and third columns in
+ * the matrices (VMX0, VMX1 and VMX2 values).
+ * The offset vector VMX3 remains untouched.
+ * Put another way, the value of bits 0 through 9
+ * is swapped with the value of
+ * bits 20 through 31 in VMX0, VMX1 and VMX2,
+ * taking into consideration the compression
+ * that is used on bits 0 through 9. Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_VIDEO 0xfd7e4883
+#define B2R2_VMX1_BGR_TO_YUV_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YUV_601_VIDEO 0x107ea7d4
+#define B2R2_VMX3_BGR_TO_YUV_601_VIDEO 0x08000080
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_601_GFX 0xfdde8870
+#define B2R2_VMX1_BGR_TO_YUV_601_GFX 0x03220442
+#define B2R2_VMX2_BGR_TO_YUV_601_GFX 0x0e3ed7da
+#define B2R2_VMX3_BGR_TO_YUV_601_GFX 0x08004080
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_VIDEO 0xfe9e2483
+#define B2R2_VMX1_BGR_TO_YUV_709_VIDEO 0x0262dc37
+#define B2R2_VMX2_BGR_TO_YUV_709_VIDEO 0x107e6fe2
+#define B2R2_VMX3_BGR_TO_YUV_709_VIDEO 0x08000080
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_BGR_TO_YUV_709_GFX 0xfebe6871
+#define B2R2_VMX1_BGR_TO_YUV_709_GFX 0x0202742f
+#define B2R2_VMX2_BGR_TO_YUV_709_GFX 0x0e3ea7e6
+#define B2R2_VMX3_BGR_TO_YUV_709_GFX 0x08004080
+
+
+/* VMX register values for YUV to BGR conversion */
+/* Note: All YUV -> BGR values are constructed
+ * from the YUV -> RGB ones, by swapping
+ * first and third rows in the matrix
+ * (VMX0 and VMX2 values). Further, the first and
+ * third values in the offset vector need to be
+ * swapped as well, i.e. bits 0 through 9 are swapped
+ * with bits 20 through 29 in the VMX3 value.
+ * Bit 0 being LSB.
+ */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_VIDEO 0x0004013f
+#define B2R2_VMX1_YUV_TO_BGR_601_VIDEO 0xe9a403aa
+#define B2R2_VMX2_YUV_TO_BGR_601_VIDEO 0x2c440000
+#define B2R2_VMX3_YUV_TO_BGR_601_VIDEO 0x3222134f
+
+/* 601 Gfx Matrix (full range conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_601_GFX 0x0004a957
+#define B2R2_VMX1_YUV_TO_BGR_601_GFX 0xe604ab9c
+#define B2R2_VMX2_YUV_TO_BGR_601_GFX 0x3324a800
+#define B2R2_VMX3_YUV_TO_BGR_601_GFX 0x2eb21f21
+
+/* 709 Video Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_VIDEO 0x00040145
+#define B2R2_VMX1_YUV_TO_BGR_709_VIDEO 0xf16403d1
+#define B2R2_VMX2_YUV_TO_BGR_709_VIDEO 0x31440000
+#define B2R2_VMX3_YUV_TO_BGR_709_VIDEO 0x31814b3b
+
+/* 709 Gfx Matrix (standard 709 conversion) */
+#define B2R2_VMX0_YUV_TO_BGR_709_GFX 0x0004a95f
+#define B2R2_VMX1_YUV_TO_BGR_709_GFX 0xef04abc9
+#define B2R2_VMX2_YUV_TO_BGR_709_GFX 0x3964a800
+#define B2R2_VMX3_YUV_TO_BGR_709_GFX 0x2df13307
+
+
+/* VMX register values for YVU to RGB conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_RGB_601_VIDEO 0x00040120
+#define B2R2_VMX1_YVU_TO_RGB_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_RGB_601_VIDEO 0x37840000
+#define B2R2_VMX3_YVU_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for RGB to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_RGB_TO_YVU_601_VIDEO 0xfa9ea483
+#define B2R2_VMX1_RGB_TO_YVU_601_VIDEO 0x0982581d
+#define B2R2_VMX2_RGB_TO_YVU_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to BGR conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_YVU_TO_BGR_601_VIDEO 0x37840000
+#define B2R2_VMX1_YVU_TO_BGR_601_VIDEO 0xF544034D
+#define B2R2_VMX2_YVU_TO_BGR_601_VIDEO 0x00040120
+#define B2R2_VMX3_YVU_TO_BGR_601_VIDEO 0x3222134F
+
+/* VMX register values for BGR to YVU conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+#define B2R2_VMX0_BGR_TO_YVU_601_VIDEO 0x107ea7d4
+#define B2R2_VMX1_BGR_TO_YVU_601_VIDEO 0x03a2584c
+#define B2R2_VMX2_BGR_TO_YVU_601_VIDEO 0xfd7e4883
+#define B2R2_VMX3_BGR_TO_YVU_601_VIDEO 0x08000080
+
+/* VMX register values for YVU to YUV conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/* Internally, the components are in fact stored
+ * with luma in the middle, i.e. UYV, which is why
+ * the values are just like for RGB->BGR conversion.
+ */
+#define B2R2_VMX0_YVU_TO_YUV_601_VIDEO 0x00000100
+#define B2R2_VMX1_YVU_TO_YUV_601_VIDEO 0x00040000
+#define B2R2_VMX2_YVU_TO_YUV_601_VIDEO 0x20000000
+#define B2R2_VMX3_YVU_TO_YUV_601_VIDEO 0x00000000
+
+/* VMX register values for RGB to BLT_YUV888 conversion */
+
+/* 601 Video Matrix (standard 601 conversion) */
+/*
+ * BLT_YUV888 has color components laid out in memory as V, U, Y, (Alpha)
+ * with V at the first byte (due to little endian addressing).
+ * B2R2 expects them to be as U, Y, V, (A)
+ * with U at the first byte.
+ * Note: RGB -> BLT_YUV888 values are calculated by multiplying
+ * the RGB -> YUV matrix [A], with [S] to form [S]x[A] where
+ * |0 1 0|
+ * S = |0 0 1|
+ * |1 0 0|
+ * Essentially changing the order of rows in the original
+ * matrix [A].
+ * row1 -> row3
+ * row2 -> row1
+ * row3 -> row2
+ * Values in the offset vector are swapped in the same manner.
+ */
+#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO 0x0982581d
+#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO 0xfa9ea483
+#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO 0x107e4beb
+#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO 0x00020080
+
+/* VMX register values for BLT_YUV888 to RGB conversion */
+
+/*
+ * Note: BLT_YUV888 -> RGB values are calculated by multiplying
+ * the YUV -> RGB matrix [A], with [S] to form [A]x[S] where
+ * |0 0 1|
+ * S = |1 0 0|
+ * |0 1 0|
+ * Essentially changing the order of columns in the original
+ * matrix [A].
+ * col1 -> col3
+ * col2 -> col1
+ * col3 -> col2
+ * Values in the offset vector remain unchanged.
+ */
+#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO 0x20000121
+#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO 0x201ea74c
+#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO 0x2006f000
+#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO 0x34f21322
+
+/* VMX register values for YUV to BLT_YUV888 conversion */
+#define B2R2_VMX0_YUV_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YUV_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX2_YUV_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX3_YUV_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YUV conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YUV 0x00000100
+#define B2R2_VMX1_BLT_YUV888_TO_YUV 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YUV 0x00040000
+#define B2R2_VMX3_BLT_YUV888_TO_YUV 0x00000000
+
+/* VMX register values for YVU to BLT_YUV888 conversion */
+#define B2R2_VMX0_YVU_TO_BLT_YUV888 0x00040000
+#define B2R2_VMX1_YVU_TO_BLT_YUV888 0x20000000
+#define B2R2_VMX2_YVU_TO_BLT_YUV888 0x00000100
+#define B2R2_VMX3_YVU_TO_BLT_YUV888 0x00000000
+
+/* VMX register values for BLT_YUV888 to YVU conversion */
+#define B2R2_VMX0_BLT_YUV888_TO_YVU 0x00040000
+#define B2R2_VMX1_BLT_YUV888_TO_YVU 0x20000000
+#define B2R2_VMX2_BLT_YUV888_TO_YVU 0x00000100
+#define B2R2_VMX3_BLT_YUV888_TO_YVU 0x00000000
+
+#endif /* B2R2_HW_H__ */
diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c
new file mode 100644
index 00000000000..ac8b5728847
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include "b2r2_internal.h"
+#include "b2r2_input_validation.h"
+#include "b2r2_debug.h"
+#include "b2r2_utils.h"
+
+#include <video/b2r2_blt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt);
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt);
+
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt);
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt);
+
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect);
+
+
+static bool is_valid_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool is_valid_bg_format(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
+{
+ s32 complete_width;
+ u32 pitch_derived_from_width;
+
+ complete_width = width_2_complete_width(width, fmt);
+
+ pitch_derived_from_width = b2r2_calc_pitch_from_width(cont,
+ complete_width, fmt);
+
+ if (pitch < pitch_derived_from_width)
+ return false;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ if (!b2r2_is_aligned(pitch, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (!b2r2_is_aligned(pitch, 4))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ if (!b2r2_is_aligned(width, 4))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ if (!b2r2_is_aligned(width, 8))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return b2r2_align_up(width, 2);
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return b2r2_align_up(width, 16);
+
+ default:
+ return width;
+ }
+}
+
+static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(width, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(width, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ if (!b2r2_is_aligned(height, 2))
+ return false;
+
+ break;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ if (!b2r2_is_aligned(height, 16))
+ return false;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img)
+{
+ /*
+ * So that we always can do width * height * bpp without overflowing a
+ * 32 bit signed integer. isqrt(s32_max / max_bpp) was used to
+ * calculate the value.
+ */
+ static const s32 max_img_width_height = 8191;
+
+ s32 img_size;
+
+ if (!is_valid_format(img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_format(img->fmt)\n");
+ return false;
+ }
+
+ if (img->width < 0 || img->width > max_img_width_height ||
+ img->height < 0 || img->height > max_img_width_height) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "img->width < 0 || "
+ "img->width > max_img_width_height || "
+ "img->height < 0 || "
+ "img->height > max_img_width_height\n");
+ return false;
+ }
+
+ if (b2r2_is_mb_fmt(img->fmt)) {
+ if (!is_complete_width_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt)\n");
+ return false;
+ }
+ } else {
+ if (0 == img->pitch &&
+ (!is_aligned_width_for_fmt(img->width, img->fmt) ||
+ !is_complete_width_for_fmt(img->width, img->fmt))) {
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
+ "0 == img->pitch && "
+ "(!is_aligned_width_for_fmt(img->width,"
+ " img->fmt) || "
+ "!is_complete_width_for_fmt(img->width,"
+ " img->fmt))\n");
+ return false;
+ }
+
+ if (img->pitch != 0 &&
+ !is_valid_pitch_for_fmt(cont, img->pitch, img->width,
+ img->fmt)) {
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
+ "img->pitch != 0 && "
+ "!is_valid_pitch_for_fmt(cont, "
+ "img->pitch, img->width, img->fmt)\n");
+ return false;
+ }
+ }
+
+ if (!is_valid_height_for_fmt(img->width, img->fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_height_for_fmt(img->width, img->fmt)\n");
+ return false;
+ }
+
+ img_size = b2r2_get_img_size(cont, img);
+
+ /*
+ * To keep the entire image inside s32 range.
+ */
+ if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type ||
+ B2R2_BLT_PTR_FD_OFFSET == img->buf.type) &&
+ img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == "
+ "img->buf.type || B2R2_BLT_PTR_FD_OFFSET == "
+ "img->buf.type) && img->buf.offset > "
+ "(u32)B2R2_MAX_S32 - (u32)img_size\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect)
+{
+ if (rect->width < 0 || rect->height < 0) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "rect->width < 0 || rect->height < 0\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
+{
+ bool is_src_img_used;
+ bool is_bg_img_used;
+ bool is_src_mask_used;
+ bool is_dst_clip_rect_used;
+
+ if (req->size != sizeof(struct b2r2_blt_req)) {
+ b2r2_log_err(cont->dev, "Validation Error: "
+ "req->size != sizeof(struct b2r2_blt_req)\n");
+ return false;
+ }
+
+ is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL ||
+ req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
+ is_bg_img_used = (req->flags & B2R2_BLT_FLAG_BG_BLEND);
+ is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK;
+ is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP;
+
+ if (is_src_img_used || is_src_mask_used) {
+ if (!validate_rect(cont, &req->src_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->src_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_rect(cont, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_rect)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!validate_rect(cont, &req->bg_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->bg_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_dst_clip_rect_used) {
+ if (!validate_rect(cont, &req->dst_clip_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_clip_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_img_used) {
+ struct b2r2_blt_rect src_img_bounding_rect;
+
+ if (!validate_img(cont, &req->src_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_img)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_img,
+ &src_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_bg_img_used) {
+ struct b2r2_blt_rect bg_img_bounding_rect;
+
+ if (!validate_img(cont, &req->bg_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->bg_img)\n");
+ return false;
+ }
+
+ if (!is_valid_bg_format(req->bg_img.fmt)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_bg_format(req->bg_img->fmt)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->bg_img,
+ &bg_img_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->bg_rect,
+ &bg_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->bg_rect, "
+ "&bg_img_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (is_src_mask_used) {
+ struct b2r2_blt_rect src_mask_bounding_rect;
+
+ if (!validate_img(cont, &req->src_mask)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_mask)\n");
+ return false;
+ }
+
+ b2r2_get_img_bounding_rect(&req->src_mask,
+ &src_mask_bounding_rect);
+ if (!b2r2_is_rect_inside_rect(&req->src_rect,
+ &src_mask_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_inside_rect(&req->src_rect, "
+ "&src_mask_bounding_rect)\n");
+ return false;
+ }
+ }
+
+ if (!validate_img(cont, &req->dst_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->dst_img)\n");
+ return false;
+ }
+
+ if (is_bg_img_used) {
+ if (!b2r2_is_rect_gte_rect(&req->bg_rect, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!b2r2_is_rect_gte_rect(&req->bg_rect, "
+ "&req->dst_rect)\n");
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h
new file mode 100644
index 00000000000..d3c6ae1b296
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_input_validation.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
new file mode 100644
index 00000000000..7a46bbda19e
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal definitions
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <video/b2r2_blt.h>
+
+#include "b2r2_global.h"
+#include "b2r2_hw.h"
+
+/**
+ * B2R2_MAX_NBR_DEVICES - The maximum number of B2R2s handled
+ */
+#define B2R2_MAX_NBR_DEVICES 1
+
+/* The maximum possible number of temporary buffers needed */
+#define MAX_TMP_BUFS_NEEDED 2
+
+/* Size of the color look-up table */
+#define CLUT_SIZE 1024
+
+/**
+ * b2r2_op_type - the type of B2R2 operation to configure
+ */
+enum b2r2_op_type {
+ B2R2_DIRECT_COPY,
+ B2R2_DIRECT_FILL,
+ B2R2_COPY,
+ B2R2_FILL,
+ B2R2_SCALE,
+ B2R2_ROTATE,
+ B2R2_SCALE_AND_ROTATE,
+ B2R2_FLIP,
+};
+
+/**
+ * b2r2_fmt_type - the type of buffer for a given format
+ */
+enum b2r2_fmt_type {
+ B2R2_FMT_TYPE_RASTER,
+ B2R2_FMT_TYPE_SEMI_PLANAR,
+ B2R2_FMT_TYPE_PLANAR,
+};
+
+/**
+ * b2r2_fmt_conv - the type of format conversion to do
+ */
+enum b2r2_fmt_conv {
+ B2R2_FMT_CONV_NONE,
+ B2R2_FMT_CONV_RGB_TO_YUV,
+ B2R2_FMT_CONV_YUV_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_YUV,
+ B2R2_FMT_CONV_RGB_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_YUV,
+};
+
+/**
+ * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
+ *
+ * @B2R2_CORE_QUEUE_AQ1: Application queue 1
+ * @B2R2_CORE_QUEUE_AQ2: Application queue 2
+ * @B2R2_CORE_QUEUE_AQ3: Application queue 3
+ * @B2R2_CORE_QUEUE_AQ4: Application queue 4
+ * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
+ * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
+ * @B2R2_CORE_QUEUE_NO_OF: Number of queues
+ */
+enum b2r2_core_queue {
+ B2R2_CORE_QUEUE_AQ1 = 0,
+ B2R2_CORE_QUEUE_AQ2,
+ B2R2_CORE_QUEUE_AQ3,
+ B2R2_CORE_QUEUE_AQ4,
+ B2R2_CORE_QUEUE_CQ1,
+ B2R2_CORE_QUEUE_CQ2,
+ B2R2_CORE_QUEUE_NO_OF,
+};
+
+#define B2R2_NUM_APPLICATIONS_QUEUES 4
+
+/**
+ * enum b2r2_core_job_state - Indicates the current state of the job
+ *
+ * @B2R2_CORE_JOB_IDLE: Never queued
+ * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
+ * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
+ * @B2R2_CORE_JOB_DONE: Completed
+ * @B2R2_CORE_JOB_CANCELED: Canceled
+ */
+enum b2r2_core_job_state {
+ B2R2_CORE_JOB_IDLE = 0,
+ B2R2_CORE_JOB_QUEUED,
+ B2R2_CORE_JOB_RUNNING,
+ B2R2_CORE_JOB_DONE,
+ B2R2_CORE_JOB_CANCELED,
+};
+
+/**
+ * b2r2_work_buf - specification for a temporary work buffer
+ *
+ * @size - the size of the buffer (set by b2r2_node_split)
+ * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
+ */
+struct b2r2_work_buf {
+ u32 size;
+ u32 phys_addr;
+ void *virt_addr;
+ u32 mem_handle;
+};
+
+struct tmp_buf {
+ struct b2r2_work_buf buf;
+ bool in_use;
+};
+
+/**
+ * struct b2r2_blt_instance - Represents the B2R2 instance (one per open)
+ *
+ * @lock: Lock to protect the instance
+ *
+ * @report_list: Ready requests that should be reported,
+ * @report_list_waitq: Wait queue for report list
+ * @no_of_active_requests: Number of requests added but not reported
+ * in callback.
+ * @synching: true if any client is waiting for b2r2_blt_synch(0)
+ * @synch_done_waitq: Wait queue to handle synching on request_id 0
+ * @control: The b2r2 control entity
+ */
+struct b2r2_blt_instance {
+ struct mutex lock;
+
+ /* Requests to be reported */
+ struct list_head report_list;
+ wait_queue_head_t report_list_waitq;
+
+ /* Below for synching */
+ u32 no_of_active_requests;
+ bool synching;
+ wait_queue_head_t synch_done_waitq;
+
+ struct b2r2_control *control;
+};
+
+/**
+ * struct b2r2_node - Represents a B2R2 node with reqister values, executed
+ * by B2R2. Should be allocated non-cached.
+ *
+ * @next: Next node
+ * @physical_address: Physical address to be given to B2R2
+ * (physical address of "node" member below)
+ * @node: The B2R2 node with register settings. This is the data
+ * that B2R2 will use.
+ *
+ */
+struct b2r2_node {
+ struct b2r2_node *next;
+ u32 physical_address;
+
+ int src_tmp_index;
+ int dst_tmp_index;
+
+ int src_index;
+
+ /* B2R2 regs comes here */
+ struct b2r2_link_list node;
+};
+
+/**
+ * struct b2r2_resolved_buf - Contains calculated information about
+ * image buffers.
+ *
+ * @physical_address: Physical address of the buffer
+ * @virtual_address: Virtual address of the buffer
+ * @is_pmem: true if buffer is from pmem
+ * @hwmem_session: Hwmem session
+ * @hwmem_alloc: Hwmem alloc
+ * @filep: File pointer of mapped file (like pmem device, frame buffer device)
+ * @file_physical_start: Physical address of file start
+ * @file_virtual_start: Virtual address of file start
+ * @file_len: File len
+ *
+ */
+struct b2r2_resolved_buf {
+ u32 physical_address;
+ void *virtual_address;
+ bool is_pmem;
+ struct hwmem_alloc *hwmem_alloc;
+ /* Data for validation below */
+ struct file *filep;
+ u32 file_physical_start;
+ u32 file_virtual_start;
+ u32 file_len;
+};
+
+/**
+ * b2r2_node_split_buf - information about a source or destination buffer
+ *
+ * @addr - the physical base address
+ * @chroma_addr - the physical address of the chroma plane
+ * @chroma_cr_addr - the physical address of the Cr chroma plane
+ * @fmt - the buffer format
+ * @fmt_type - the buffer format type
+ * @rect - the rectangle of the buffer to use
+ * @color - the color value to use is case of a fill operation
+ * @pitch - the pixmap byte pitch
+ * @height - the pixmap height
+ * @alpha_range - the alpha range of the buffer (0-128 or 0-255)
+ * @hso - the horizontal scan order
+ * @vso - the vertical scan order
+ * @endian - the endianess of the buffer
+ * @plane_selection - the plane to write if buffer is planar or semi-planar
+ */
+struct b2r2_node_split_buf {
+ u32 addr;
+ u32 chroma_addr;
+ u32 chroma_cr_addr;
+
+ enum b2r2_blt_fmt fmt;
+ enum b2r2_fmt_type type;
+
+ struct b2r2_blt_rect rect;
+ struct b2r2_blt_rect win;
+
+ s32 dx;
+ s32 dy;
+
+ u32 color;
+ u16 pitch;
+ u16 width;
+ u16 height;
+
+ enum b2r2_ty alpha_range;
+ enum b2r2_ty hso;
+ enum b2r2_ty vso;
+ enum b2r2_ty endian;
+ enum b2r2_tty dither;
+
+ /* Plane selection (used when writing to a multibuffer format) */
+ enum b2r2_tty plane_selection;
+
+ /* Chroma plane selection (used when writing planar formats) */
+ enum b2r2_tty chroma_selection;
+
+ int tmp_buf_index;
+};
+
+/**
+ * b2r2_node_split_job - an instance of a node split job
+ *
+ * @type - the type of operation
+ * @ivmx - the ivmx matrix to use for color conversion
+ * @blend - determines if blending is enabled
+ * @clip - determines if destination clipping is enabled
+ * @swap_fg_bg - determines if FG and BG should be swapped when blending
+ * @flags - the flags passed in the blt request
+ * @flag_param - parameter required by certain flags,
+ * e.g. color for source color keying.
+ * @transform - the transforms passed in the blt request
+ * @global_alpha - the global alpha
+ * @clip_rect - the clipping rectangle to use
+ * @horiz_rescale - determmines if horizontal rescaling is enabled
+ * @horiz_sf - the horizontal scale factor
+ * @vert_rescale - determines if vertical rescale is enabled
+ * @vert_sf - the vertical scale factor
+ * @src - the incoming source buffer
+ * @bg - the incoming background buffer
+ * @dst - the outgoing destination buffer
+ * @work_bufs - work buffer specifications
+ * @tmp_bufs - temporary buffers
+ * @buf_count - the number of temporary buffers used for the job
+ * @node_count - the number of nodes used for the job
+ * @max_buf_size - the maximum size of temporary buffers
+ * @nbr_rows - the number of tile rows in the blit operation
+ * @nbr_cols - the number of time columns in the blit operation
+ */
+struct b2r2_node_split_job {
+ enum b2r2_op_type type;
+
+ const u32 *ivmx;
+
+ bool blend;
+ bool clip;
+ bool rotation;
+
+ bool swap_fg_bg;
+
+ u32 flags;
+ u32 flag_param;
+ u32 transform;
+ u32 global_alpha;
+
+ struct b2r2_blt_rect clip_rect;
+
+ bool h_rescale;
+ u16 h_rsf;
+
+ bool v_rescale;
+ u16 v_rsf;
+
+ struct b2r2_node_split_buf src;
+ struct b2r2_node_split_buf bg;
+ struct b2r2_node_split_buf dst;
+
+ struct b2r2_work_buf work_bufs[MAX_TMP_BUFS_NEEDED];
+ struct b2r2_node_split_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+
+ u32 buf_count;
+ u32 node_count;
+ u32 max_buf_size;
+};
+
+/**
+ * struct b2r2_core_job - Represents a B2R2 core job
+ *
+ * @start_sentinel: Memory overwrite guard
+ *
+ * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
+ * @prio: Job priority, from -19 up to 20. Mapped to the
+ * B2R2 application queues. Filled in by the client.
+ * @first_node_address: Physical address of the first node. Filled
+ * in by the client.
+ * @last_node_address: Physical address of the last node. Filled
+ * in by the client.
+ *
+ * @callback: Function that will be called when the job is done.
+ * @acquire_resources: Function that allocates the resources needed
+ * to execute the job (i.e. SRAM alloc). Must not
+ * sleep if atomic, should fail with negative error code
+ * if resources not available.
+ * @release_resources: Function that releases the resources previously
+ * allocated by acquire_resources (i.e. SRAM alloc).
+ * @release: Function that will be called when the reference count reaches
+ * zero.
+ *
+ * @job_id: Unique id for this job, assigned by B2R2 core
+ * @job_state: The current state of the job
+ * @jiffies: Number of jiffies needed for this request
+ *
+ * @list: List entry element for internal list management
+ * @event: Wait queue event to wait for job done
+ * @work: Work queue structure, for callback implementation
+ *
+ * @queue: The queue that this job shall be submitted to
+ * @control: B2R2 Queue control
+ * @pace_control: For composition queue only
+ * @interrupt_context: Context for interrupt
+ * @hw_start_time: The point when the b2r2 HW queue is activated for this job
+ * @nsec_active_in_hw: Time spent on the b2r2 HW queue for this job
+ *
+ * @end_sentinel: Memory overwrite guard
+ */
+struct b2r2_core_job {
+ u32 start_sentinel;
+
+ /* Data to be filled in by client */
+ int tag;
+ int prio;
+ u32 first_node_address;
+ u32 last_node_address;
+ void (*callback)(struct b2r2_core_job *);
+ int (*acquire_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release)(struct b2r2_core_job *);
+
+ /* Output data, do not modify */
+ int job_id;
+ enum b2r2_core_job_state job_state;
+ unsigned long jiffies;
+
+ /* Data below is internal to b2r2_core, do not modify */
+
+ /* Reference counting */
+ u32 ref_count;
+
+ /* Internal data */
+ struct list_head list;
+ wait_queue_head_t event;
+ struct work_struct work;
+
+ /* B2R2 HW data */
+ enum b2r2_core_queue queue;
+ u32 control;
+ u32 pace_control;
+ u32 interrupt_context;
+
+ /* Timing data */
+ u32 hw_start_time;
+ s32 nsec_active_in_hw;
+
+ u32 end_sentinel;
+};
+
+/**
+ * struct b2r2_blt_request - Represents one B2R2 blit request
+ *
+ * @instance: Back pointer to the instance structure
+ * @list: List item to keep track of requests per instance
+ * @user_req: The request received from userspace
+ * @job: The administration structure for the B2R2 job,
+ * consisting of one or more nodes
+ * @node_split_job: The administration structure for the B2R2 node split job
+ * @first_node: Pointer to the first B2R2 node
+ * @request_id: Request id for this job
+ * @node_split_handle: Handle of the node split
+ * @src_resolved: Calculated info about the source buffer
+ * @src_mask_resolved: Calculated info about the source mask buffer
+ * @bg_resolved: Calculated info about the background buffer
+ * @dst_resolved: Calculated info about the destination buffer
+ * @profile: True if the blit shall be profiled, false otherwise
+ */
+struct b2r2_blt_request {
+ struct b2r2_blt_instance *instance;
+ struct list_head list;
+ struct b2r2_blt_req user_req;
+ struct b2r2_core_job job;
+ struct b2r2_node_split_job node_split_job;
+ struct b2r2_node *first_node;
+ int request_id;
+
+ /* Resolved buffer addresses */
+ struct b2r2_resolved_buf src_resolved;
+ struct b2r2_resolved_buf src_mask_resolved;
+ struct b2r2_resolved_buf bg_resolved;
+ struct b2r2_resolved_buf dst_resolved;
+
+ /* TBD: Info about SRAM usage & needs */
+ struct b2r2_work_buf *bufs;
+ u32 buf_count;
+
+ /* color look-up table */
+ void *clut;
+ u32 clut_phys_addr;
+
+ /* Profiling stuff */
+ bool profile;
+
+ s32 nsec_active_in_cpu;
+
+ u32 start_time_nsec;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_mem_heap - The memory heap
+ *
+ * @start_phys_addr: Physical memory start address
+ * @start_virt_ptr: Virtual pointer to start
+ * @size: Memory size
+ * @align: Alignment
+ * @blocks: List of all blocks
+ * @heap_lock: Protection for the heap
+ * @node_size: Size of each B2R2 node
+ * @node_heap: Heap for B2R2 node allocations
+ * @debugfs_root_dir: Debugfs B2R2 mem root dir
+ * @debugfs_heap_stats: Debugfs B2R2 memory status
+ * @debugfs_dir_blocks: Debugfs B2R2 free blocks dir
+ */
+struct b2r2_mem_heap {
+ dma_addr_t start_phys_addr;
+ void *start_virt_ptr;
+ u32 size;
+ u32 align;
+ struct list_head blocks;
+ spinlock_t heap_lock;
+ u32 node_size;
+ struct dma_pool *node_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_heap_stats;
+ struct dentry *debugfs_dir_blocks;
+#endif
+};
+
+/**
+ *
+ * @miscdev: The miscdev presenting b2r2 to the system
+ * @dev: The device handle of the b2r2 instance
+ * @id: The id of the b2r2 instance
+ * @name: The name of the b2r2 instance
+ * @data: Used to store a reference to b2r2_core
+ * @tmp_bufs: Temporary buffers needed in the node splitter
+ * @filters_initialized: Indicating of filters has been
+ * initialized for this b2r2 instance
+ * @mem_heap: The b2r2 heap, e.g. used to allocate nodes
+ * @debugfs_latest_request: Copy of the latest request issued
+ * @debugfs_root_dir: The debugfs root directory, e.g. /debugfs/b2r2
+ * @debugfs_debug_root_dir: The b2r2 debug root directory,
+ * e.g. /debugfs/b2r2/debug
+ * @stat_lock: Spin lock protecting the statistics
+ * @stat_n_jobs_added: Number of jobs added to b2r2_core
+ * @stat_n_jobs_released: Number of jobs released (job_release called)
+ * @stat_n_jobs_in_report_list: Number of jobs currently in the report list
+ * @stat_n_in_blt: Number of client threads currently exec inside b2r2_blt()
+ * @stat_n_in_blt_synch: Number of client threads currently waiting for synch
+ * @stat_n_in_blt_add: Number of client threads currenlty adding in b2r2_blt
+ * @stat_n_in_blt_wait: Number of client threads currently waiting in b2r2_blt
+ * @stat_n_in_synch_0: Number of client threads currently in b2r2_blt_sync
+ * waiting for all client jobs to finish
+ * @stat_n_in_synch_job: Number of client threads currently in b2r2_blt_sync
+ * waiting specific job to finish
+ * @stat_n_in_query_cap: Number of clients currently in query cap
+ * @stat_n_in_open: Number of clients currently in b2r2_blt_open
+ * @stat_n_in_release: Number of clients currently in b2r2_blt_release
+ * @last_job_lock: Mutex protecting last_job
+ * @last_job: The last running job on this b2r2 instance
+ * @last_job_chars: Temporary buffer used in printing last_job
+ * @prev_node_count: Node cound of last_job
+ */
+struct b2r2_control {
+ struct miscdevice miscdev;
+ struct device *dev;
+ int id;
+ char name[16];
+ void *data;
+ struct tmp_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+ int filters_initialized;
+ struct b2r2_mem_heap mem_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct b2r2_blt_request debugfs_latest_request;
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_debug_root_dir;
+#endif
+ struct mutex stat_lock;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_released;
+ unsigned long stat_n_jobs_in_report_list;
+ unsigned long stat_n_in_blt;
+ unsigned long stat_n_in_blt_synch;
+ unsigned long stat_n_in_blt_add;
+ unsigned long stat_n_in_blt_wait;
+ unsigned long stat_n_in_synch_0;
+ unsigned long stat_n_in_synch_job;
+ unsigned long stat_n_in_query_cap;
+ unsigned long stat_n_in_open;
+ unsigned long stat_n_in_release;
+ struct mutex last_job_lock;
+ struct b2r2_node *last_job;
+ char *last_job_chars;
+ int prev_node_count;
+};
+
+/* FIXME: The functions below should be removed when we are
+ switching to the new Robert Lind allocator */
+
+/**
+ * b2r2_blt_alloc_nodes() - Allocate nodes
+ *
+ * @node_count: Number of nodes to allocate
+ *
+ * Return:
+ * Returns a pointer to the first node in the node list.
+ */
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int node_count);
+
+/**
+ * b2r2_blt_free_nodes() - Release nodes previously allocated via
+ * b2r2_generate_nodes
+ *
+ * @first_node: First node in linked list of nodes
+ */
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node);
+
+/**
+ * b2r2_blt_module_init() - Initialize the B2R2 blt module
+ */
+int b2r2_blt_module_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
+ */
+void b2r2_blt_module_exit(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_kernel_if.c b/drivers/video/b2r2/b2r2_kernel_if.c
new file mode 100644
index 00000000000..373311ccca5
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_kernel_if.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 kernel interface for beeing a separate module
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#ifdef CONFIG_ANDROID_PMEM
+#include <linux/android_pmem.h>
+#endif
+#include <linux/fb.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+
+EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(fput_light);
+EXPORT_SYMBOL(flush_cache_range);
+EXPORT_SYMBOL(task_sched_runtime);
+#ifdef CONFIG_ANDROID_PMEM
+EXPORT_SYMBOL(get_pmem_file);
+EXPORT_SYMBOL(put_pmem_file);
+EXPORT_SYMBOL(flush_pmem_file);
+#endif
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c
new file mode 100644
index 00000000000..e5235d2c97f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "b2r2_internal.h"
+#include "b2r2_mem_alloc.h"
+
+/* Forward declarations */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free);
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mem_heap,
+ struct b2r2_mem_heap_status *mem_heap_status);
+
+/* Align value down to specified alignment */
+static inline u32 align_down(u32 align, u32 value)
+{
+ return value & ~(align - 1);
+}
+
+/* Align value up to specified alignment */
+static inline u32 align_up(u32 align, u32 value)
+{
+ return (value + align - 1) & ~(align - 1);
+}
+
+
+#ifdef CONFIG_DEBUG_FS
+/* About debugfs:
+ * debugfs is a mountable debug file system.
+ *
+ * Mount like this:
+ * mkdir /debug
+ * mount -t debugfs none /debug
+ * ls /debug/b2r2/mem
+ *
+ * ls -al /debug/b2r2/mem/blocks
+ * cat /debug/b2r2/mem/stats
+ */
+
+
+/* Create string containing memory heap status */
+static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf)
+{
+ struct b2r2_mem_heap_status mem_heap_status;
+
+ if (b2r2_mem_heap_status(mem_heap, &mem_heap_status) != 0) {
+ strcpy(buf, "Error, failed to get status\n");
+ return buf;
+ }
+
+ sprintf(buf,
+ "Handle : 0x%lX\n"
+ "Physical start address : 0x%lX\n"
+ "Size : %lu\n"
+ "Align : %lu\n"
+ "No of blocks allocated : %lu\n"
+ "Allocated size : %lu\n"
+ "No of free blocks : %lu\n"
+ "Free size : %lu\n"
+ "No of locks : %lu\n"
+ "No of locked : %lu\n"
+ "No of nodes : %lu\n",
+ (unsigned long) mem_heap,
+ (unsigned long) mem_heap_status.start_phys_addr,
+ (unsigned long) mem_heap_status.size,
+ (unsigned long) mem_heap_status.align,
+ (unsigned long) mem_heap_status.num_alloc,
+ (unsigned long) mem_heap_status.allocated_size,
+ (unsigned long) mem_heap_status.num_free,
+ (unsigned long) mem_heap_status.free_size,
+ (unsigned long) mem_heap_status.num_locks,
+ (unsigned long) mem_heap_status.num_locked,
+ (unsigned long) mem_heap_status.num_nodes);
+
+ return buf;
+}
+
+/*
+ * Print memory heap status on file
+ * (Use like "cat /debug/b2r2/mem/stats")
+ */
+static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_heap *mem_heap = filp->f_dentry->d_inode->i_private;
+ char Buf[400];
+ size_t dev_size;
+ int ret = 0;
+
+ get_b2r2_mem_stats(mem_heap, Buf);
+ dev_size = strlen(Buf);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for the "stats" file */
+static const struct file_operations debugfs_b2r2_mem_stats_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_stats_read,
+};
+
+/* read function for file in the "blocks" sub directory */
+static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct b2r2_mem_block *mem_block = filp->f_dentry->d_inode->i_private;
+ char Buf[200];
+ size_t dev_size;
+ int ret = 0;
+
+ dev_size = sprintf(Buf, "offset: %08lX %s size: %8d "
+ "lock_count: %2d\n",
+ (unsigned long) mem_block->offset,
+ mem_block->free ? "free" : "allc",
+ mem_block->size,
+ mem_block->lock_count);
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (*f_pos + count > dev_size)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, Buf, count))
+ ret = -EINVAL;
+ *f_pos += count;
+ ret = count;
+
+out:
+ return ret;
+}
+
+/* debugfs file operations for files in the "blocks" directory */
+static const struct file_operations debugfs_b2r2_mem_block_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_b2r2_mem_block_read,
+};
+
+/*
+ * Create or update the debugfs directory entry for a file in the
+ * "blocks" directory (a memory allocation)
+ */
+void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
+ struct dentry *parent)
+{
+ struct timespec tm = current_kernel_time();
+ struct timespec atime = tm;
+ struct timespec mtime = tm;
+ struct timespec ctime = tm;
+
+ if (mem_block->debugfs_block) {
+ atime = mem_block->debugfs_block->d_inode->i_atime;
+ ctime = mem_block->debugfs_block->d_inode->i_ctime;
+ debugfs_remove(mem_block->debugfs_block);
+ }
+
+ /* Add the block in debugfs */
+ if (mem_block->free)
+ sprintf(mem_block->debugfs_fname, "%08lX free",
+ (unsigned long) mem_block->offset);
+ else {
+ sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX "
+ "lck:%d ",
+ (unsigned long) mem_block->offset,
+ (unsigned long) mem_block,
+ mem_block->lock_count);
+ }
+
+ mem_block->debugfs_block = debugfs_create_file(
+ mem_block->debugfs_fname,
+ 0444, parent, mem_block,
+ &debugfs_b2r2_mem_block_fops);
+ if (mem_block->debugfs_block) {
+ mem_block->debugfs_block->d_inode->i_size = mem_block->size;
+ mem_block->debugfs_block->d_inode->i_atime = atime;
+ mem_block->debugfs_block->d_inode->i_mtime = mtime;
+ mem_block->debugfs_block->d_inode->i_ctime = ctime;
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* Module initialization function */
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size)
+{
+ struct b2r2_mem_block *mem_block;
+ u32 aligned_size;
+
+ dev_info(cont->dev, "%s: Creating heap for size %d bytes\n",
+ __func__, (int) heap_size);
+
+ /* Align size */
+ aligned_size = align_down(align, heap_size);
+ if (aligned_size == 0)
+ return -EINVAL;
+
+ cont->mem_heap.start_virt_ptr = dma_alloc_coherent(cont->dev,
+ aligned_size, &(cont->mem_heap.start_phys_addr), GFP_KERNEL);
+ if (!cont->mem_heap.start_phys_addr || !cont->mem_heap.start_virt_ptr) {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize the heap */
+ cont->mem_heap.size = aligned_size;
+ cont->mem_heap.align = align;
+
+ INIT_LIST_HEAD(&cont->mem_heap.blocks);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Register debugfs */
+ if (cont->mem_heap.debugfs_root_dir) {
+ cont->mem_heap.debugfs_heap_stats = debugfs_create_file(
+ "stats", 0444, cont->mem_heap.debugfs_root_dir,
+ &cont->mem_heap, &debugfs_b2r2_mem_stats_fops);
+ cont->mem_heap.debugfs_dir_blocks = debugfs_create_dir(
+ "blocks", cont->mem_heap.debugfs_root_dir);
+ }
+#endif
+
+ /* Create the first _free_ memory block */
+ mem_block = b2r2_mem_block_alloc(cont, 0, aligned_size, true);
+ if (!mem_block) {
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Add the free block to the blocks list */
+ list_add(&mem_block->list, &cont->mem_heap.blocks);
+
+ /* Allocate separate heap for B2R2 nodes */
+ cont->mem_heap.node_size = node_size;
+ cont->mem_heap.node_heap = dma_pool_create("b2r2_node_cache",
+ cont->dev, node_size, align, 4096);
+ if (!cont->mem_heap.node_heap) {
+ b2r2_mem_block_free(mem_block);
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_init);
+
+/* Module exit function */
+void b2r2_mem_exit(struct b2r2_control *cont)
+{
+ struct list_head *ptr;
+
+ /* Free B2R2 node heap */
+ dma_pool_destroy(cont->mem_heap.node_heap);
+
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ b2r2_mem_block_free(mem_block);
+ }
+
+ dma_free_coherent(cont->dev, cont->mem_heap.size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
+}
+EXPORT_SYMBOL(b2r2_mem_exit);
+
+/* Return status of the heap */
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mheap,
+ struct b2r2_mem_heap_status *mem_heap_status)
+{
+ struct list_head *ptr;
+
+ if (!mheap || !mem_heap_status)
+ return -EINVAL;
+ memset(mem_heap_status, 0, sizeof(*mem_heap_status));
+
+ /* Lock the heap */
+ spin_lock(&mheap->heap_lock);
+
+ /* Fill in static info */
+ mem_heap_status->start_phys_addr = mheap->start_phys_addr;
+ mem_heap_status->size = mheap->size;
+ mem_heap_status->align = mheap->align;
+
+ list_for_each(ptr, &mheap->blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free) {
+ mem_heap_status->num_free++;
+ mem_heap_status->free_size += mem_block->size;
+ } else {
+ if (mem_block->lock_count) {
+ mem_heap_status->num_locked++;
+ mem_heap_status->num_locks +=
+ mem_block->lock_count;
+ }
+ mem_heap_status->num_alloc++;
+ mem_heap_status->allocated_size += mem_block->size;
+ }
+ }
+
+ spin_unlock(&mheap->heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_heap_status);
+
+/* Internal: Allocate a housekeeping structure
+ * for an allocated or free memory block
+ */
+static struct b2r2_mem_block *b2r2_mem_block_alloc(
+ struct b2r2_control *cont, u32 offset, u32 size, bool free)
+{
+ struct b2r2_mem_block *mem_block = kmalloc(
+ sizeof(struct b2r2_mem_block), GFP_KERNEL);
+
+ if (mem_block) {
+ mem_block->offset = offset;
+ mem_block->size = size;
+ mem_block->free = free;
+ mem_block->lock_count = 0;
+
+ INIT_LIST_HEAD(&mem_block->list);
+
+#ifdef CONFIG_DEBUG_FS
+ mem_block->debugfs_block = NULL;
+ /* Add the block in debugfs */
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ return mem_block;
+}
+
+/* Internal: Release housekeeping structure */
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block)
+{
+ if (mem_block) {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(mem_block->debugfs_block);
+#endif
+ kfree(mem_block);
+ }
+}
+
+/* Allocate a block from the heap */
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle)
+{
+ int ret = 0;
+ struct list_head *ptr;
+ struct b2r2_mem_block *found_mem_block = NULL;
+ u32 aligned_size;
+
+ if (!mem_handle)
+ return -EINVAL;
+
+ printk(KERN_INFO "%s: size=%d\n", __func__, requested_size);
+
+ *mem_handle = 0;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ aligned_size = align_up(cont->mem_heap.align, requested_size);
+ /* Try to find the best matching free block of suitable size */
+ list_for_each(ptr, &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *mem_block =
+ list_entry(ptr, struct b2r2_mem_block, list);
+
+ if (mem_block->free && mem_block->size >= aligned_size &&
+ (!found_mem_block ||
+ mem_block->size < found_mem_block->size)) {
+ found_mem_block = mem_block;
+ if (found_mem_block->size == aligned_size)
+ break;
+ }
+ }
+
+ if (found_mem_block) {
+ struct b2r2_mem_block *new_block
+ = b2r2_mem_block_alloc(cont,
+ found_mem_block->offset,
+ requested_size, false);
+
+ if (new_block) {
+ /* Insert the new block before the found block */
+ list_add_tail(&new_block->list,
+ &found_mem_block->list);
+
+ /* Split the free block */
+ found_mem_block->offset += aligned_size;
+ found_mem_block->size -= aligned_size;
+
+ if (found_mem_block->size == 0)
+ b2r2_mem_block_free(found_mem_block);
+ else {
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(
+ found_mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ *mem_handle = (u32) new_block;
+ *returned_size = aligned_size;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else
+ ret = -ENOMEM;
+
+ if (ret != 0) {
+ *returned_size = 0;
+ *mem_handle = (u32) 0;
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_alloc);
+
+/* Free the allocated block */
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle)
+{
+ int ret = 0;
+ struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ if (!ret && mem_block->free)
+ ret = -EINVAL;
+
+ if (!ret) {
+ printk(KERN_INFO "%s: freeing block 0x%p\n", __func__, mem_block);
+ /* Release the block */
+
+ mem_block->free = true;
+ mem_block->size = align_up(cont->mem_heap.align,
+ mem_block->size);
+
+ /* Join with previous block if possible */
+ if (mem_block->list.prev != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *prev_block =
+ list_entry(mem_block->list.prev,
+ struct b2r2_mem_block, list);
+
+ if (prev_block->free &&
+ (prev_block->offset + prev_block->size) ==
+ mem_block->offset) {
+ mem_block->offset = prev_block->offset;
+ mem_block->size += prev_block->size;
+
+ b2r2_mem_block_free(prev_block);
+ }
+ }
+
+ /* Join with next block if possible */
+ if (mem_block->list.next != &cont->mem_heap.blocks) {
+ struct b2r2_mem_block *next_block
+ = list_entry(mem_block->list.next,
+ struct b2r2_mem_block,
+ list);
+
+ if (next_block->free &&
+ (mem_block->offset + mem_block->size) ==
+ next_block->offset) {
+ mem_block->size += next_block->size;
+
+ b2r2_mem_block_free(next_block);
+ }
+ }
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+ }
+
+ /* Unlock */
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_mem_free);
+
+/* Lock the allocated block in memory */
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count++;
+
+ if (phys_addr)
+ *phys_addr = cont->mem_heap.start_phys_addr + mem_block->offset;
+ if (virt_ptr)
+ *virt_ptr = (char *) cont->mem_heap.start_virt_ptr +
+ mem_block->offset;
+ if (size)
+ *size = align_up(cont->mem_heap.align, mem_block->size);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_mem_block_entry(mem_block,
+ cont->mem_heap.debugfs_dir_blocks);
+#endif
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(b2r2_mem_lock);
+
+/* Unlock the allocated block in memory */
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle)
+{
+ struct b2r2_mem_block *mem_block =
+ (struct b2r2_mem_block *) mem_handle;
+
+ if (!mem_block)
+ return -EINVAL;
+
+ /* Lock the heap */
+ spin_lock(&cont->mem_heap.heap_lock);
+
+ mem_block->lock_count--;
+
+ spin_unlock(&cont->mem_heap.heap_lock);
+
+ /* debugfs will be updated in release */
+ return 0;
+/* return b2r2_mem_free(mem_handle);*/
+}
+EXPORT_SYMBOL(b2r2_mem_unlock);
+
+/* Allocate one or more b2r2 nodes from DMA pool */
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node)
+{
+ int i;
+ int ret = 0;
+ u32 physical_address;
+ struct b2r2_node *first_node_ptr;
+ struct b2r2_node *node_ptr;
+
+ /* Check input parameters */
+ if ((num_nodes <= 0) || !first_node) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Invalid parameter for b2r2_node_alloc, "
+ "num_nodes=%d, first_node=%ld\n",
+ (int) num_nodes, (long) first_node);
+ return -EINVAL;
+ }
+
+ /* Allocate the first node */
+ first_node_ptr = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (!first_node_ptr) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Failed to allocate memory for node\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize first node */
+ first_node_ptr->next = NULL;
+ first_node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+
+ /* Allocate and initialize remaining nodes, */
+ /* and link them into a list */
+ for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
+ node_ptr->next = dma_pool_alloc(cont->mem_heap.node_heap,
+ GFP_DMA | GFP_KERNEL, &physical_address);
+ if (node_ptr->next) {
+ node_ptr = node_ptr->next;
+ node_ptr->next = NULL;
+ node_ptr->physical_address = physical_address +
+ offsetof(struct b2r2_node, node);
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /* If all nodes were allocated successfully, */
+ /* return the first node */
+ if (!ret)
+ *first_node = first_node_ptr;
+ else
+ b2r2_node_free(cont, first_node_ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(b2r2_node_alloc);
+
+/* Free a linked list of b2r2 nodes */
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node)
+{
+ struct b2r2_node *current_node = first_node;
+ struct b2r2_node *next_node = NULL;
+
+ /* Traverse the linked list and free the nodes */
+ while (current_node != NULL) {
+ next_node = current_node->next;
+ dma_pool_free(cont->mem_heap.node_heap, current_node,
+ current_node->physical_address -
+ offsetof(struct b2r2_node, node));
+ current_node = next_node;
+ }
+}
+EXPORT_SYMBOL(b2r2_node_free);
+
+MODULE_AUTHOR("Robert Lind <robert.lind@ericsson.com");
+MODULE_DESCRIPTION("Ericsson AB B2R2 physical memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h
new file mode 100644
index 00000000000..4fd1e66abca
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_mem_alloc.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 internal Memory allocator
+ *
+ * Author: Robert Lind <robert.lind@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_MEM_ALLOC_H
+#define __B2R2_MEM_ALLOC_H
+
+#include "b2r2_internal.h"
+
+
+/**
+ * struct b2r2_mem_heap_status - Information about current state of the heap
+ *
+ * @start_phys_addr: Physical address of the the memory area
+ * @size: Size of the memory area
+ * @align: Alignment of start and allocation sizes (in bytes).
+ * @num_alloc: Number of memory allocations
+ * @allocated_size: Size allocated (sum of requested sizes)
+ * @num_free: Number of free blocks (fragments)
+ * @free_size: Free size available for allocation
+ * @num_locks: Sum of number of number of locks on memory allocations
+ * @num_locked: Number of locked memory allocations
+ * @num_nodes: Number of node allocations
+ *
+ **/
+struct b2r2_mem_heap_status {
+ u32 start_phys_addr;
+ u32 size;
+ u32 align;
+ u32 num_alloc;
+ u32 allocated_size;
+ u32 num_free;
+ u32 free_size;
+ u32 num_locks;
+ u32 num_locked;
+ u32 num_nodes;
+};
+
+/**
+ * struct b2r2_mem_block - Represents one block of b2r2
+ * physical memory, free or allocated
+ *
+ * @list: For membership in list
+ * @offset: Offset in b2r2 physical memory area (aligned)
+ * @size: Size of the object (requested size if busy, else actual)
+ * @free: True if the block is free
+ * @lock_count: Lock count
+ * @debugfs_fname: Debugfs file name
+ * @debugfs_block: Debugfs dir entry for the block
+ */
+struct b2r2_mem_block {
+ struct list_head list;
+ u32 offset;
+ u32 size;
+ bool free;
+ u32 lock_count;
+#ifdef CONFIG_DEBUG_FS
+ char debugfs_fname[80];
+ struct dentry *debugfs_block;
+#endif
+};
+
+
+/* B2R2 memory API (kernel) */
+
+/**
+ * b2r2_mem_init() - Initializes the B2R2 memory manager
+ * @dev: Pointer to device to use for allocating the memory heap
+ * @heap_size: Size of the heap (in bytes)
+ * @align: Alignment to use for memory allocations on heap (in bytes)
+ * @node_size: Size of each B2R2 node (in bytes)
+ *
+ * Returns 0 if success, else negative error code
+ **/
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size);
+
+/**
+ * b2r2_mem_exit() - Cleans up the B2R2 memory manager
+ *
+ **/
+void b2r2_mem_exit(struct b2r2_control *cont);
+
+/**
+ * b2r2_mem_alloc() - Allocates memory block from physical memory heap
+ * @requested_size: Requested size
+ * @returned_size: Actual size of memory block. Might be adjusted due to
+ * alignment but is always >= requested size if function
+ * succeeds
+ * @mem_handle: Returned memory handle
+ *
+ * All memory allocations are movable when not locked.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle);
+
+/**
+ * b2r2_mem_free() - Frees an allocation
+ * @mem_handle: Memory handle
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_mem_lock() - Lock memory in memory and return physical address
+ * @mem_handle: Memory handle
+ * @phys_addr: Returned physical address to start of memory allocation.
+ * May be NULL.
+ * @virt_ptr: Returned virtual address pointer to start of memory allocation.
+ * May be NULL.
+ * @size: Returned size of memory allocation. May be NULL.
+ *
+ * The adress of the memory allocation is locked and the physical address
+ * is returned.
+ * The lock count is incremented by one.
+ * You need to call b2r2_mem_unlock once for each call to
+ * b2r2_mem_lock.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size);
+
+/**
+ * b2r2_mem_unlock() - Unlock previously locked memory
+ * @mem_handle: Memory handle
+ *
+ * Decrements lock count. When lock count reaches 0 the
+ * memory area is movable again.
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle);
+
+/**
+ * b2r2_node_alloc() - Allocates B2R2 node from physical memory heap
+ * @num_nodes: Number of linked nodes to allocate
+ * @first_node: Returned pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node);
+
+/**
+ * b2r2_node_free() - Frees a linked list of allocated B2R2 nodes
+ * @first_node: Pointer to first node in linked list
+ *
+ * Returns 0 if OK else negative error value
+ **/
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node);
+
+
+#endif /* __B2R2_MEM_ALLOC_H */
diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c
new file mode 100644
index 00000000000..1f48bac6fe7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_gen.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node generator
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-mapping.h>
+#include "b2r2_internal.h"
+
+static void free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ struct b2r2_node *node = first_node;
+ int no_of_nodes = 0;
+
+ while (node) {
+ no_of_nodes++;
+ node = node->next;
+ }
+
+ dma_free_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ first_node,
+ first_node->physical_address -
+ offsetof(struct b2r2_node, node));
+}
+
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int no_of_nodes)
+{
+ u32 physical_address;
+ struct b2r2_node *nodes;
+ struct b2r2_node *tmpnode;
+
+ if (no_of_nodes <= 0) {
+ dev_err(cont->dev, "%s: Wrong number of nodes (%d)",
+ __func__, no_of_nodes);
+ return NULL;
+ }
+
+ /* Allocate the memory */
+ nodes = (struct b2r2_node *) dma_alloc_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ &physical_address, GFP_DMA | GFP_KERNEL);
+
+ if (nodes == NULL) {
+ dev_err(cont->dev,
+ "%s: Failed to alloc memory for nodes",
+ __func__);
+ return NULL;
+ }
+
+ /* Build the linked list */
+ tmpnode = nodes;
+ physical_address += offsetof(struct b2r2_node, node);
+ while (no_of_nodes--) {
+ tmpnode->physical_address = physical_address;
+ if (no_of_nodes)
+ tmpnode->next = tmpnode + 1;
+ else
+ tmpnode->next = NULL;
+
+ tmpnode++;
+ physical_address += sizeof(struct b2r2_node);
+ }
+
+ return nodes;
+}
+
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
+{
+ free_nodes(cont, first_node);
+}
+
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
new file mode 100644
index 00000000000..6587ef0c343
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -0,0 +1,3734 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include "b2r2_debug.h"
+#include "b2r2_node_split.h"
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+#include "b2r2_filters.h"
+#include "b2r2_utils.h"
+
+#include <linux/kernel.h>
+
+/*
+ * Macros and constants
+ */
+#define ABS(x) ((x) < 0 ? -(x) : (x))
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#define INSTANCES_DEFAULT_SIZE 10
+#define INSTANCES_GROW_SIZE 5
+
+/*
+ * Internal types
+ */
+
+
+/*
+ * Global variables
+ */
+
+/**
+ * VMX values for different color space conversions
+ */
+static const u32 vmx_rgb_to_yuv[] = {
+ B2R2_VMX0_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YUV_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_blt_yuv888[] = {
+ B2R2_VMX0_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX1_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX2_RGB_TO_BLT_YUV888_601_VIDEO,
+ B2R2_VMX3_RGB_TO_BLT_YUV888_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_rgb[] = {
+ B2R2_VMX0_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YUV_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YUV_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_blt_yuv888_to_rgb[] = {
+ B2R2_VMX0_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX1_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX2_BLT_YUV888_TO_RGB_601_VIDEO,
+ B2R2_VMX3_BLT_YUV888_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_blt_yuv888[] = {
+ B2R2_VMX0_YUV_TO_BLT_YUV888,
+ B2R2_VMX1_YUV_TO_BLT_YUV888,
+ B2R2_VMX2_YUV_TO_BLT_YUV888,
+ B2R2_VMX3_YUV_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yuv[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YUV,
+ B2R2_VMX1_BLT_YUV888_TO_YUV,
+ B2R2_VMX2_BLT_YUV888_TO_YUV,
+ B2R2_VMX3_BLT_YUV888_TO_YUV,
+};
+
+static const u32 vmx_yvu_to_blt_yuv888[] = {
+ B2R2_VMX0_YVU_TO_BLT_YUV888,
+ B2R2_VMX1_YVU_TO_BLT_YUV888,
+ B2R2_VMX2_YVU_TO_BLT_YUV888,
+ B2R2_VMX3_YVU_TO_BLT_YUV888,
+};
+
+static const u32 vmx_blt_yuv888_to_yvu[] = {
+ B2R2_VMX0_BLT_YUV888_TO_YVU,
+ B2R2_VMX1_BLT_YUV888_TO_YVU,
+ B2R2_VMX2_BLT_YUV888_TO_YVU,
+ B2R2_VMX3_BLT_YUV888_TO_YVU,
+};
+
+static const u32 vmx_yvu_to_rgb[] = {
+ B2R2_VMX0_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX1_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX2_YVU_TO_RGB_601_VIDEO,
+ B2R2_VMX3_YVU_TO_RGB_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_yvu[] = {
+ B2R2_VMX0_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX1_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX2_RGB_TO_YVU_601_VIDEO,
+ B2R2_VMX3_RGB_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_rgb_to_bgr[] = {
+ B2R2_VMX0_RGB_TO_BGR,
+ B2R2_VMX1_RGB_TO_BGR,
+ B2R2_VMX2_RGB_TO_BGR,
+ B2R2_VMX3_RGB_TO_BGR,
+};
+
+static const u32 vmx_bgr_to_yuv[] = {
+ B2R2_VMX0_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YUV_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YUV_601_VIDEO,
+};
+
+static const u32 vmx_yuv_to_bgr[] = {
+ B2R2_VMX0_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YUV_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YUV_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_bgr_to_yvu[] = {
+ B2R2_VMX0_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX1_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX2_BGR_TO_YVU_601_VIDEO,
+ B2R2_VMX3_BGR_TO_YVU_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_bgr[] = {
+ B2R2_VMX0_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX1_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX2_YVU_TO_BGR_601_VIDEO,
+ B2R2_VMX3_YVU_TO_BGR_601_VIDEO,
+};
+
+static const u32 vmx_yvu_to_yuv[] = {
+ B2R2_VMX0_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX1_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX2_YVU_TO_YUV_601_VIDEO,
+ B2R2_VMX3_YVU_TO_YUV_601_VIDEO,
+};
+
+/*
+ * Forward declaration of private functions
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count);
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count);
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count);
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this);
+
+static void configure_src(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx);
+static void configure_bg(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg);
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_blend(struct b2r2_control *cont, struct b2r2_node *node,
+ u32 flags, u32 global_alpha);
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect);
+
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+static void configure_direct_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next);
+static int configure_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color, enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_direct_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, struct b2r2_node **next);
+static int configure_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rotate(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_scale(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this);
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip);
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr, const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect, bool color_fill, u32 color);
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *this, u32 max_size,
+ enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
+
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt);
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color);
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel);
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt);
+
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt);
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt);
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt);
+
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width);
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt);
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt);
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt);
+
+static bool is_transform(const struct b2r2_blt_request *req);
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf);
+static s32 inv_rescale(s32 dim, u16 sf);
+
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf);
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values);
+
+static void reset_nodes(struct b2r2_node *node);
+
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt);
+
+/*
+ * Public functions
+ */
+
+/**
+ * b2r2_node_split_analyze() - analyzes the request
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
+ u32 max_buf_size, u32 *node_count, struct b2r2_work_buf **bufs,
+ u32 *buf_count, struct b2r2_node_split_job *this)
+{
+ int ret;
+ bool color_fill;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ memset(this, 0, sizeof(*this));
+
+ /* Copy parameters */
+ this->flags = req->user_req.flags;
+ this->transform = req->user_req.transform;
+ this->max_buf_size = max_buf_size;
+ this->global_alpha = req->user_req.global_alpha;
+ this->buf_count = 0;
+ this->node_count = 0;
+
+ if (this->flags & B2R2_BLT_FLAG_BLUR) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Unsupported formats on src */
+ switch (req->user_req.src_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on dst */
+ switch (req->user_req.dst_img.fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ if (is_bgr_fmt(req->user_req.src_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Unsupported formats on bg */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ /*
+ * There are no ivmx on source 1, so check that there is no
+ * such requirement on the background to destination format
+ * conversion. This check is sufficient since the node splitter
+ * currently does not support destination ivmx. That fact also
+ * removes the source format as a parameter when checking the
+ * background format.
+ */
+ if (bg_format_require_ivmx(req->user_req.bg_img.fmt,
+ req->user_req.dst_img.fmt)) {
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) &&
+ (is_yuv_fmt(req->user_req.src_img.fmt) ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 ||
+ req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source color keying "
+ "with YUV or pure alpha formats.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY |
+ B2R2_BLT_FLAG_SOURCE_MASK)) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ req->user_req.clut == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check for color fill */
+ color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+
+ /*
+ * B2R2 cannot handle destination clipping on buffers
+ * allocated close to 64MiB bank boundaries.
+ * recalculate src_ and dst_rect to avoid clipping.
+ */
+ recalculate_rects(cont, (struct b2r2_blt_req *) &req->user_req);
+
+ /* Configure the source and destination buffers */
+ set_buf(cont, &this->src, req->src_resolved.physical_address,
+ &req->user_req.src_img, &req->user_req.src_rect,
+ color_fill, req->user_req.src_color);
+
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ set_buf(cont, &this->bg, req->bg_resolved.physical_address,
+ &req->user_req.bg_img, &req->user_req.bg_rect,
+ false, 0);
+ }
+
+ set_buf(cont, &this->dst, req->dst_resolved.physical_address,
+ &req->user_req.dst_img, &req->user_req.dst_rect, false,
+ 0);
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
+ "bg.rect=(%4d, %4d, %4d, %4d)\t"
+ "dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
+ this->src.rect.y, this->src.rect.width, this->src.rect.height,
+ this->bg.rect.x, this->bg.rect.y, this->bg.rect.width,
+ this->bg.rect.height, this->dst.rect.x, this->dst.rect.y,
+ this->dst.rect.width, this->dst.rect.height);
+
+ if (this->flags & B2R2_BLT_FLAG_DITHER)
+ this->dst.dither = B2R2_TTY_RGB_ROUND_DITHER;
+
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ this->flag_param = req->user_req.src_color;
+
+ /* Check for blending */
+ if ((this->flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) &&
+ (this->global_alpha != 255))
+ this->blend = true;
+ else if (this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ this->blend = (color_fill && fmt_has_alpha(this->dst.fmt)) ||
+ fmt_has_alpha(this->src.fmt);
+ else if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ this->blend = true;
+
+ if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) {
+ b2r2_log_warn(cont->dev, "%s: Unsupported: blend with planar"
+ " source\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check for clipping */
+ this->clip = (this->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
+ if (this->clip) {
+ s32 l = req->user_req.dst_clip_rect.x;
+ s32 r = l + req->user_req.dst_clip_rect.width;
+ s32 t = req->user_req.dst_clip_rect.y;
+ s32 b = t + req->user_req.dst_clip_rect.height;
+
+ /* Intersect the clip and buffer rects */
+ if (l < 0)
+ l = 0;
+ if (r > req->user_req.dst_img.width)
+ r = req->user_req.dst_img.width;
+ if (t < 0)
+ t = 0;
+ if (b > req->user_req.dst_img.height)
+ b = req->user_req.dst_img.height;
+
+ this->clip_rect.x = l;
+ this->clip_rect.y = t;
+ this->clip_rect.width = r - l;
+ this->clip_rect.height = b - t;
+ } else {
+ /* Set the clip rectangle to the buffer bounds */
+ this->clip_rect.x = 0;
+ this->clip_rect.y = 0;
+ this->clip_rect.width = req->user_req.dst_img.width;
+ this->clip_rect.height = req->user_req.dst_img.height;
+ }
+
+ /* Validate the destination */
+ ret = check_rect(cont, &req->user_req.dst_img, &req->user_req.dst_rect,
+ &this->clip_rect);
+ if (ret < 0)
+ goto error;
+
+ /* Validate the source (if not color fill) */
+ if (!color_fill) {
+ ret = check_rect(cont, &req->user_req.src_img,
+ &req->user_req.src_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Validate the background source */
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ ret = check_rect(cont, &req->user_req.bg_img,
+ &req->user_req.bg_rect, NULL);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Do the analysis depending on the type of operation */
+ if (color_fill) {
+ ret = analyze_color_fill(this, req, &this->node_count);
+ } else {
+
+ bool upsample;
+ bool downsample;
+
+ /*
+ * YUV formats that are non-raster, non-yuv444 needs to be
+ * up (or down) sampled using the resizer.
+ *
+ * NOTE: The resizer needs to be enabled for YUV444 as well,
+ * even though there is no upsampling. This is most
+ * likely a bug in the hardware.
+ */
+ upsample = this->src.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->src.fmt);
+ downsample = this->dst.type != B2R2_FMT_TYPE_RASTER &&
+ is_yuv_fmt(this->dst.fmt);
+
+ if (is_transform(req) || upsample || downsample)
+ ret = analyze_transform(this, req, &this->node_count,
+ &this->buf_count);
+ else
+ ret = analyze_copy(this, req, &this->node_count,
+ &this->buf_count);
+ }
+
+ if (ret == -ENOSYS) {
+ goto unsupported;
+ } else if (ret < 0) {
+ b2r2_log_warn(cont->dev, "%s: Analysis failed!\n", __func__);
+ goto error;
+ }
+
+ /* Setup the origin and movement of the destination window */
+ if (this->dst.hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ this->dst.dx = -this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x + this->dst.rect.width - 1;
+ } else {
+ this->dst.dx = this->dst.win.width;
+ this->dst.win.x = this->dst.rect.x;
+ }
+ if (this->dst.vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ this->dst.dy = -this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y + this->dst.rect.height - 1;
+ } else {
+ this->dst.dy = this->dst.win.height;
+ this->dst.win.y = this->dst.rect.y;
+ }
+
+ *buf_count = this->buf_count;
+ *node_count = this->node_count;
+
+ if (this->buf_count > 0)
+ *bufs = &this->work_bufs[0];
+
+ b2r2_log_info(cont->dev, "%s: dst.win=(%d, %d, %d, %d), "
+ "dst.dx=%d, dst.dy=%d\n", __func__, this->dst.win.x,
+ this->dst.win.y, this->dst.win.width, this->dst.win.height,
+ this->dst.dx, this->dst.dy);
+ if (this->buf_count > 0)
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, buf_size=%d, "
+ "node_count=%d\n", __func__, *buf_count,
+ bufs[0]->size, *node_count);
+ else
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, node_count=%d\n",
+ __func__, *buf_count, *node_count);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * b2r2_node_split_configure() - configures the node list
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node *node = first;
+
+ u32 x_pixels = 0;
+ u32 y_pixels = 0;
+
+ reset_nodes(node);
+
+ while (y_pixels < dst->rect.height) {
+ s32 dst_x = dst->win.x;
+ s32 dst_w = dst->win.width;
+
+ /* Clamp window height */
+ if (dst->win.height > dst->rect.height - y_pixels)
+ dst->win.height = dst->rect.height - y_pixels;
+
+ while (x_pixels < dst->rect.width) {
+
+ /* Clamp window width */
+ if (dst_w > dst->rect.width - x_pixels)
+ dst->win.width = dst->rect.width - x_pixels;
+
+ ret = configure_tile(cont, this, node, &node);
+ if (ret < 0)
+ goto error;
+
+ dst->win.x += dst->dx;
+ x_pixels += max(dst->dx, -dst->dx);
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+ }
+
+ dst->win.y += dst->dy;
+ y_pixels += max(dst->dy, -dst->dy);
+
+ dst->win.x = dst_x;
+ dst->win.width = dst_w;
+ x_pixels = 0;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first,
+ struct b2r2_work_buf *bufs, u32 buf_count)
+{
+ struct b2r2_node *node = first;
+
+ while (node != NULL) {
+ /* The indices are offset by one */
+ if (node->dst_tmp_index) {
+ BUG_ON(node->dst_tmp_index > buf_count);
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as "
+ "dst\n", __func__, node->dst_tmp_index);
+
+ node->node.GROUP1.B2R2_TBA =
+ bufs[node->dst_tmp_index - 1].phys_addr;
+ }
+ if (node->src_tmp_index) {
+ u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
+
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as src "
+ "%d ", __func__, node->src_tmp_index,
+ node->src_index);
+
+ BUG_ON(node->src_tmp_index > buf_count);
+
+ switch (node->src_index) {
+ case 1:
+ b2r2_log_info(cont->dev, "1\n");
+ node->node.GROUP3.B2R2_SBA = addr;
+ break;
+ case 2:
+ b2r2_log_info(cont->dev, "2\n");
+ node->node.GROUP4.B2R2_SBA = addr;
+ break;
+ case 3:
+ b2r2_log_info(cont->dev, "3\n");
+ node->node.GROUP5.B2R2_SBA = addr;
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s: tba=%p\tsba=%p\n", __func__,
+ (void *)node->node.GROUP1.B2R2_TBA,
+ (void *)node->node.GROUP4.B2R2_SBA);
+
+ node = node->next;
+ }
+
+ return 0;
+}
+
+/**
+ * b2r2_node_split_unassign_buffers() - releases temporary buffers
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
+{
+ return;
+}
+
+/**
+ * b2r2_node_split_cancel() - cancels and releases a job instance
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ memset(this, 0, sizeof(*this));
+
+ return;
+}
+
+/*
+ * Private functions
+ */
+
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
+{
+ struct b2r2_blt_rect new_dst_rect;
+ struct b2r2_blt_rect new_src_rect;
+ struct b2r2_blt_rect new_bg_rect;
+
+ b2r2_trim_rects(cont,
+ req, &new_bg_rect, &new_dst_rect, &new_src_rect);
+
+ req->dst_rect = new_dst_rect;
+ req->src_rect = new_src_rect;
+ req->bg_rect = new_bg_rect;
+}
+
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ const struct b2r2_blt_rect *clip)
+{
+ int ret;
+
+ s32 l, r, b, t;
+
+ /* Check rectangle dimensions*/
+ if ((rect->width <= 0) || (rect->height <= 0)) {
+ b2r2_log_warn(cont->dev, "%s: Illegal rect (%d, %d, %d, %d)\n",
+ __func__, rect->x, rect->y, rect->width,
+ rect->height);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* If we are using clip we should only look at the intersection of the
+ rects */
+ if (clip) {
+ l = MAX(rect->x, clip->x);
+ t = MAX(rect->y, clip->y);
+ r = MIN(rect->x + rect->width, clip->x + clip->width);
+ b = MIN(rect->y + rect->height, clip->y + clip->height);
+ } else {
+ l = rect->x;
+ t = rect->y;
+ r = rect->x + rect->width;
+ b = rect->y + rect->height;
+ }
+
+ /* Check so that the rect isn't outside the buffer */
+ if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect origin outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((r > img->width) || (b > img->height)) {
+ b2r2_log_warn(cont->dev, "%s: rect ends outside buffer\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Check so the intersected rectangle isn't empty */
+ if ((l == r) || (t == b)) {
+ b2r2_log_warn(cont->dev,
+ "%s: rect is empty (width or height zero)\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * bg_format_require_ivmx()
+ *
+ * Check if there are any color space conversion needed for the
+ * background to the destination format.
+ */
+static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
+ enum b2r2_blt_fmt dst_fmt)
+{
+ if (is_rgb_fmt(bg_fmt)) {
+ if (is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ } else if (is_yvu_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt) &&
+ !is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (bg_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ bg_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ bg_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ if (is_rgb_fmt(dst_fmt)) {
+ return true;
+ } else if (is_yvu_fmt(dst_fmt)) {
+ return true;
+ } else if (is_yuv_fmt(dst_fmt)) {
+ switch (dst_fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ break;
+ default:
+ return true;
+ }
+ }
+ } else if (is_yuv_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_bgr_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yvu_fmt(dst_fmt))
+ return true;
+ } else if (is_bgr_fmt(bg_fmt)) {
+ if (is_rgb_fmt(dst_fmt))
+ return true;
+ else if (is_yvu_fmt(dst_fmt))
+ return true;
+ else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ return true;
+ else if (is_yuv_fmt(dst_fmt))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * analyze_fmt_conv() - analyze the format conversions needed for a job
+ */
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 **vmx, u32 *node_count)
+{
+ if (is_rgb_fmt(src->fmt)) {
+ if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ *vmx = &vmx_rgb_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_yuv[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ } else if (is_yvu_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yvu_to_blt_yuv888[0];
+ else if (is_yuv_fmt(dst->fmt) &&
+ !is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (src->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ src->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ src->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) {
+ /*
+ * (A)YUV/VUY(A) formats differ only in component
+ * order. This is handled by the endianness bit
+ * in B2R2_STY/TTY registers when src/target are set.
+ */
+ if (is_rgb_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_rgb[0];
+ } else if (is_yvu_fmt(dst->fmt)) {
+ *vmx = &vmx_blt_yuv888_to_yvu[0];
+ } else if (is_yuv_fmt(dst->fmt)) {
+ switch (dst->fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* do nothing */
+ break;
+ default:
+ *vmx = &vmx_blt_yuv888_to_yuv[0];
+ break;
+ }
+ }
+ } else if (is_yuv_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_rgb[0];
+ else if (is_bgr_fmt(dst->fmt))
+ *vmx = &vmx_yuv_to_bgr[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ *vmx = &vmx_yuv_to_blt_yuv888[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_yvu_to_yuv[0];
+ } else if (is_bgr_fmt(src->fmt)) {
+ if (is_rgb_fmt(dst->fmt))
+ *vmx = &vmx_rgb_to_bgr[0];
+ else if (is_yvu_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yvu[0];
+ else if (dst->fmt == B2R2_BLT_FMT_24_BIT_YUV888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 ||
+ dst->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ dst->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ BUG_ON(1);
+ else if (is_yuv_fmt(dst->fmt))
+ *vmx = &vmx_bgr_to_yuv[0];
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_RASTER) {
+ *node_count = 1;
+ } else if (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR) {
+ *node_count = 2;
+ } else if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ *node_count = 3;
+ } else {
+ /* That's strange... */
+ BUG_ON(1);
+ }
+
+ return 0;
+}
+
+/**
+ * analyze_color_fill() - analyze a color fill operation
+ */
+static int analyze_color_fill(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Destination must be raster for raw fill to work */
+ if (this->dst.type != B2R2_FMT_TYPE_RASTER) {
+ b2r2_log_warn(cont->dev,
+ "%s: fill requires raster destination\n",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* We will try to fill the entire rectangle in one go */
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /* Check if this is a direct fill */
+ if ((!this->blend) && ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ARGB8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_AYUV8888) ||
+ (this->dst.fmt == B2R2_BLT_FMT_32_BIT_VUYA8888))) {
+ this->type = B2R2_DIRECT_FILL;
+
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+
+ /* The entire destination rectangle will be */
+ memcpy(&this->dst.win, &this->dst.rect,
+ sizeof(this->dst.win));
+ *node_count = 1;
+ } else {
+ this->type = B2R2_FILL;
+
+ /* Determine the fill color format */
+ if (this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) {
+ /* The color format will be the same as the dst fmt */
+ this->src.fmt = this->dst.fmt;
+ } else {
+ /* If the dst fmt is YUV the fill fmt will be as well */
+ if (is_yuv_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else if (is_rgb_fmt(this->dst.fmt)) {
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(this->dst.fmt)) {
+ /* Color will still be ARGB, we will translate
+ using IVMX (configured later) */
+ this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Illegal destination format for fill",
+ __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Also, B2R2 seems to ignore the pixel alpha value */
+ if (((this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)
+ != 0) &&
+ ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)
+ == 0) && fmt_has_alpha(this->src.fmt)) {
+ u8 pixel_alpha = get_alpha(this->src.fmt,
+ this->src.color);
+ u32 new_global = pixel_alpha * this->global_alpha / 255;
+
+ this->global_alpha = (u8)new_global;
+
+ /* Set the pixel alpha to full opaque so we don't get
+ any nasty surprises */
+ this->src.color = set_alpha(this->src.fmt, 0xFF,
+ this->src.color);
+ }
+
+ ret = analyze_fmt_conv(
+ cont, &this->src, &this->dst, &this->ivmx,
+ node_count);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_transform() - analyze a transform operation (rescale, rotate, etc.)
+ */
+static int analyze_transform(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool is_scaling;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /*
+ * The transform enum is defined so that all rotation transforms are
+ * masked with the rotation flag
+ */
+ this->rotation = (this->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0;
+
+ /* B2R2 cannot do rotations if the destination is not raster, or 422R */
+ if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER ||
+ this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
+ this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) {
+ b2r2_log_warn(cont->dev,
+ "%s: Unsupported operation "
+ "(rot && (!dst_raster || dst==422R))",
+ __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Flip the image by changing the scan order of the destination */
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ this->dst.hso = B2R2_TY_HSO_RIGHT_TO_LEFT;
+ if (this->transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ /* Check for scaling */
+ if (this->rotation) {
+ is_scaling = (this->src.rect.width != this->dst.rect.height) ||
+ (this->src.rect.height != this->dst.rect.width);
+ } else {
+ is_scaling = (this->src.rect.width != this->dst.rect.width) ||
+ (this->src.rect.height != this->dst.rect.height);
+ }
+
+ /* Plane separated formats must be treated as scaling */
+ is_scaling = is_scaling ||
+ (this->src.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->src.type == B2R2_FMT_TYPE_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_SEMI_PLANAR) ||
+ (this->dst.type == B2R2_FMT_TYPE_PLANAR);
+
+ if (is_scaling && this->rotation && this->blend) {
+ /* TODO: This is unsupported. Fix it! */
+ b2r2_log_info(cont->dev, "%s: Unsupported operation "
+ "(rot+rescale+blend)\n", __func__);
+ ret = -ENOSYS;
+ goto unsupported;
+ }
+
+ /* Check which type of transform */
+ if (is_scaling && this->rotation) {
+ ret = analyze_rot_scale(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (is_scaling) {
+ ret = analyze_scaling(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else if (this->rotation) {
+ ret = analyze_rotate(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ } else {
+ /* No additional nodes needed for a flip */
+ ret = analyze_copy(this, req, node_count, buf_count);
+ if (ret < 0)
+ goto error;
+ this->type = B2R2_FLIP;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+unsupported:
+ return ret;
+}
+
+/**
+ * analyze_copy() - analyze a copy operation
+ */
+static int analyze_copy(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ if (!this->blend &&
+ !(this->flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
+ (this->src.fmt == this->dst.fmt) &&
+ (this->src.type == B2R2_FMT_TYPE_RASTER) &&
+ (this->dst.rect.x >= this->clip_rect.x) &&
+ (this->dst.rect.y >= this->clip_rect.y) &&
+ (this->dst.rect.x + this->dst.rect.width <=
+ this->clip_rect.x + this->clip_rect.width) &&
+ (this->dst.rect.y + this->dst.rect.height <=
+ this->clip_rect.y + this->clip_rect.height)) {
+ this->type = B2R2_DIRECT_COPY;
+ *node_count = 1;
+ } else {
+ u32 copy_count;
+
+ this->type = B2R2_COPY;
+
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst,
+ &this->ivmx, &copy_count);
+ if (ret < 0)
+ goto error;
+
+ *node_count = copy_count;
+ }
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+static int calc_rot_count(u32 width, u32 height)
+{
+ int count;
+
+ count = width / B2R2_ROTATE_MAX_WIDTH;
+ if (width % B2R2_ROTATE_MAX_WIDTH)
+ count++;
+ if (height > B2R2_ROTATE_MAX_WIDTH &&
+ height % B2R2_ROTATE_MAX_WIDTH)
+ count *= 2;
+
+ return count;
+}
+
+static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ struct b2r2_control *cont = req->instance->control;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ u32 num_rows;
+ u32 num_cols;
+ u32 rot_count;
+ u32 rescale_count;
+ u32 nodes_per_rot;
+ u32 nodes_per_rescale;
+ u32 right_width;
+ u32 bottom_height;
+ const u32 *dummy_vmx;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ /* Calculate the desired tmp buffer size */
+ tmp->win.width = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ tmp->win.width >>= 10;
+ tmp->win.width = min(tmp->win.width, dst->rect.height);
+ tmp->win.height = dst->rect.width;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, dst->fmt, tmp->win.width,
+ tmp->win.height);
+ tmp->tmp_buf_index = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ tmp->win.width = tmp->rect.width;
+ tmp->win.height = tmp->rect.height;
+
+ tmp->dither = dst->dither;
+ dst->dither = 0;
+
+ /* Update the dst window with the actual tmp buffer dimensions */
+ dst->win.width = tmp->win.height;
+ dst->win.height = tmp->win.width;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ /*
+ * Calculate how many nodes are required to copy to and from the tmp
+ * buffer
+ */
+ ret = analyze_fmt_conv(cont, src, tmp, &this->ivmx, &nodes_per_rescale);
+ if (ret < 0)
+ goto error;
+
+ /* We will not do any format conversion in the rotation stage */
+ ret = analyze_fmt_conv(cont, tmp, dst, &dummy_vmx, &nodes_per_rot);
+ if (ret < 0)
+ goto error;
+
+ /* Calculate node count for the inner tiles */
+ num_cols = dst->rect.width / dst->win.width;
+ num_rows = dst->rect.height / dst->win.height;
+
+ rescale_count = num_cols * num_rows;
+ rot_count = calc_rot_count(dst->win.height, dst->win.width) *
+ num_cols * num_rows;
+
+ right_width = dst->rect.width % dst->win.width;
+ bottom_height = dst->rect.height % dst->win.height;
+
+ /* Calculate node count for the rightmost tiles */
+ if (right_width) {
+ u32 count = calc_rot_count(dst->win.height, right_width);
+
+ rot_count += count * num_rows;
+ rescale_count += num_rows;
+ b2r2_log_info(cont->dev, "%s: rightmost: %d nodes\n", __func__,
+ count*num_rows);
+ }
+
+ /* Calculate node count for the bottom tiles */
+ if (bottom_height) {
+ u32 count = calc_rot_count(bottom_height, dst->win.width);
+
+ rot_count += count * num_cols;
+ rescale_count += num_cols;
+ b2r2_log_info(cont->dev, "%s: bottom: %d nodes\n", __func__,
+ count * num_cols);
+
+ }
+
+ /* And finally for the bottom right corner */
+ if (right_width && bottom_height) {
+ u32 count = calc_rot_count(bottom_height, right_width);
+
+ rot_count += count;
+ rescale_count++;
+ b2r2_log_info(cont->dev, "%s: bottom right: %d nodes\n",
+ __func__, count);
+
+ }
+
+ *node_count = rot_count * nodes_per_rot;
+ *node_count += rescale_count * nodes_per_rescale;
+ *buf_count = 1;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+static int analyze_rot_scale_upscale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ /* TODO: When upscaling we should optimally to the rotation first... */
+ return analyze_rot_scale_downscale(this, req, node_count, buf_count);
+}
+
+/**
+ * analyze_rot_scaling() - analyzes a combined rotation and scaling op
+ */
+static int analyze_rot_scale(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ bool upscale;
+ struct b2r2_control *cont = req->instance->control;
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 20);
+
+ if (upscale)
+ ret = analyze_rot_scale_upscale(this, req, node_count,
+ buf_count);
+ else
+ ret = analyze_rot_scale_downscale(this, req, node_count,
+ buf_count);
+
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_SCALE_AND_ROTATE;
+
+ return 0;
+
+error:
+ return ret;
+}
+
+/**
+ * analyze_scaling() - analyze a rescale operation
+ */
+static int analyze_scaling(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 copy_count;
+ u32 nbr_cols;
+ s32 dst_w;
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ ret = analyze_scale_factors(cont, this);
+ if (ret < 0)
+ goto error;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &copy_count);
+ if (ret < 0)
+ goto error;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ /*
+ * We need to subtract from the actual maximum rescale width since the
+ * start of the stripe will be floored and the end ceiled. This could in
+ * some cases cause the stripe to be one pixel more than the maximum
+ * width.
+ *
+ * Example:
+ * x = 127.8, w = 127.8
+ *
+ * The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels.
+ */
+ dst_w = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ if (dst_w < (1 << 10))
+ dst_w = 1;
+ else
+ dst_w >>= 10;
+
+ b2r2_log_info(cont->dev, "%s: dst_w=%d dst.rect.width=%d\n",
+ __func__, dst_w, this->dst.rect.width);
+
+ this->dst.win.width = min(dst_w, this->dst.rect.width);
+
+ b2r2_log_info(cont->dev, "%s: dst.win.width=%d\n",
+ __func__, this->dst.win.width);
+
+ nbr_cols = this->dst.rect.width / this->dst.win.width;
+ if (this->dst.rect.width % this->dst.win.width)
+ nbr_cols++;
+
+ *node_count = copy_count * nbr_cols;
+
+ this->type = B2R2_SCALE;
+
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * analyze_rotate() - analyze a rotate operation
+ */
+static int analyze_rotate(struct b2r2_node_split_job *this,
+ const struct b2r2_blt_request *req, u32 *node_count,
+ u32 *buf_count)
+{
+ int ret;
+ u32 nodes_per_tile;
+ struct b2r2_control *cont = req->instance->control;
+
+ /* Find out how many nodes a simple copy would require */
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
+ &nodes_per_tile);
+ if (ret < 0)
+ goto error;
+
+ this->type = B2R2_ROTATE;
+
+ /* The rotated stripes are written to the destination bottom-up */
+ if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM)
+ this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+ else
+ this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
+
+ memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
+
+ this->dst.win.height = min(this->dst.win.height, B2R2_ROTATE_MAX_WIDTH);
+
+ /*
+ * B2R2 cannot do rotations on stripes that are not a multiple of 16
+ * pixels high (if larger than 16 pixels).
+ */
+ if (this->dst.win.width > 16)
+ this->dst.win.width -= (this->dst.win.width % 16);
+
+ /* Blending cannot be combined with rotation */
+ if (this->blend) {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+ enum b2r2_blt_fmt tmp_fmt;
+
+ if (is_yuv_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ else if (is_bgr_fmt(this->dst.fmt))
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ else
+ tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+
+ setup_tmp_buf(cont, tmp, this->max_buf_size, tmp_fmt,
+ this->dst.win.width, this->dst.win.height);
+
+ tmp->tmp_buf_index = 1;
+
+ tmp->vso = B2R2_TY_VSO_BOTTOM_TO_TOP;
+
+ this->dst.win.width = tmp->rect.width;
+ this->dst.win.height = tmp->rect.height;
+
+ memcpy(&tmp->win, &tmp->rect, sizeof(tmp->win));
+
+ *buf_count = 1;
+ this->work_bufs[0].size = tmp->pitch * tmp->height;
+
+ /*
+ * One more node per tile is required to rotate to the temp
+ * buffer.
+ */
+ nodes_per_tile++;
+ }
+
+ /* Finally, calculate the node count */
+ *node_count = nodes_per_tile *
+ calc_rot_count(this->src.rect.width, this->src.rect.height);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * analyze_scale_factors() - determines the scale factors for the op
+ */
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ u16 hsf;
+ u16 vsf;
+
+ if (this->rotation) {
+ ret = calculate_scale_factor(cont, this->src.rect.width,
+ this->dst.rect.height, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont, this->src.rect.height,
+ this->dst.rect.width, &vsf);
+ if (ret < 0)
+ goto error;
+ } else {
+ ret = calculate_scale_factor(cont, this->src.rect.width,
+ this->dst.rect.width, &hsf);
+ if (ret < 0)
+ goto error;
+
+ ret = calculate_scale_factor(cont, this->src.rect.height,
+ this->dst.rect.height, &vsf);
+ if (ret < 0)
+ goto error;
+ }
+
+ this->h_rescale = hsf != (1 << 10);
+ this->v_rescale = vsf != (1 << 10);
+
+ this->h_rsf = hsf;
+ this->v_rsf = vsf;
+
+ b2r2_log_info(cont->dev, "%s: h_rsf=%.4x\n", __func__, this->h_rsf);
+ b2r2_log_info(cont->dev, "%s: v_rsf=%.4x\n", __func__, this->v_rsf);
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_tile() - configures one tile of a blit operation
+ */
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret = 0;
+
+ struct b2r2_node *last;
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *bg = &this->bg;
+
+ struct b2r2_blt_rect dst_norm;
+ struct b2r2_blt_rect src_norm;
+ struct b2r2_blt_rect bg_norm;
+
+ /* Normalize the dest coords to the dest rect coordinate space */
+ dst_norm.x = dst->win.x - dst->rect.x;
+ dst_norm.y = dst->win.y - dst->rect.y;
+ dst_norm.width = dst->win.width;
+ dst_norm.height = dst->win.height;
+
+ if (dst->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) {
+ /* The y coord should be counted from the bottom */
+ dst_norm.y = dst->rect.height - (dst_norm.y + 1);
+ }
+ if (dst->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) {
+ /* The x coord should be counted from the right */
+ dst_norm.x = dst->rect.width - (dst_norm.x + 1);
+ }
+
+ /* If the destination is rotated we should swap x, y */
+ if (this->rotation) {
+ src_norm.x = dst_norm.y;
+ src_norm.y = dst_norm.x;
+ src_norm.width = dst_norm.height;
+ src_norm.height = dst_norm.width;
+ } else {
+ src_norm.x = dst_norm.x;
+ src_norm.y = dst_norm.y;
+ src_norm.width = dst_norm.width;
+ src_norm.height = dst_norm.height;
+ }
+
+ /* Convert to src coordinate space */
+ src->win.x = src_norm.x + src->rect.x;
+ src->win.y = src_norm.y + src->rect.y;
+ src->win.width = src_norm.width;
+ src->win.height = src_norm.height;
+
+ /* Set bg norm */
+ bg_norm.x = dst->win.x - dst->rect.x;
+ bg_norm.y = dst->win.y - dst->rect.y;
+ bg_norm.width = dst->win.width;
+ bg_norm.height = dst->win.height;
+
+ /* Convert to bg coordinate space */
+ bg->win.x = bg_norm.x + bg->rect.x;
+ bg->win.y = bg_norm.y + bg->rect.y;
+ bg->win.width = bg_norm.width;
+ bg->win.height = bg_norm.height;
+ bg->vso = dst->vso;
+ bg->hso = dst->hso;
+
+ /* Do the configuration depending on operation type */
+ switch (this->type) {
+ case B2R2_DIRECT_FILL:
+ configure_direct_fill(cont, node, this->src.color, dst, &last);
+ break;
+
+ case B2R2_DIRECT_COPY:
+ configure_direct_copy(cont, node, src, dst, &last);
+ break;
+
+ case B2R2_FILL:
+ ret = configure_fill(cont, node, src->color, src->fmt,
+ dst, this->ivmx, &last);
+ break;
+
+ case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */
+ case B2R2_COPY:
+ ret = configure_copy(
+ cont, node, src, dst, this->ivmx, &last, this);
+ break;
+
+ case B2R2_ROTATE:
+ {
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ if (this->blend) {
+ b2r2_log_info(cont->dev, "%s: rotation + "
+ "blend\n", __func__);
+
+ tmp->win.x = 0;
+ tmp->win.y = tmp->win.height - 1;
+ tmp->win.width = dst->win.width;
+ tmp->win.height = dst->win.height;
+
+ /* Rotate to the temp buf */
+ ret = configure_rotate(cont, node, src, tmp,
+ this->ivmx, &node, NULL);
+ if (ret < 0)
+ goto error;
+
+ /* Then do a copy to the destination */
+ ret = configure_copy(cont, node, tmp, dst, NULL,
+ &last, this);
+ } else {
+ /* Just do a rotation */
+ ret = configure_rotate(cont, node, src, dst,
+ this->ivmx, &last, this);
+ }
+ }
+ break;
+
+ case B2R2_SCALE:
+ ret = configure_scale(cont, node, src, dst, this->h_rsf,
+ this->v_rsf, this->ivmx, &last, this);
+ break;
+
+ case B2R2_SCALE_AND_ROTATE:
+ ret = configure_rot_scale(cont, this, node, &last);
+ break;
+
+ default:
+ b2r2_log_warn(cont->dev, "%s: Unsupported request\n", __func__);
+ ret = -ENOSYS;
+ goto error;
+ break;
+
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Scale and rotate will configure its own blending and clipping */
+ if (this->type != B2R2_SCALE_AND_ROTATE) {
+
+ /* Configure blending and clipping */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (this->blend) {
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ configure_bg(cont, node, bg,
+ this->swap_fg_bg);
+ else
+ configure_bg(cont, node, dst,
+ this->swap_fg_bg);
+ configure_blend(cont, node, this->flags,
+ this->global_alpha);
+ }
+ if (this->clip)
+ configure_clip(cont, node, &this->clip_rect);
+
+ node = node->next;
+
+ } while (node != last);
+ }
+
+ /* Consume the nodes */
+ *next = last;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Error!\n", __func__);
+ return ret;
+}
+
+/*
+ * configure_sub_rot() - configure a sub-rotation
+ *
+ * This functions configures a set of nodes for rotation using the destination
+ * window instead of the rectangle for calculating tiles.
+ */
+static int configure_sub_rot(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *job)
+{
+ int ret;
+
+ struct b2r2_blt_rect src_win;
+ struct b2r2_blt_rect dst_win;
+
+ u32 y_pixels = 0;
+ u32 x_pixels = 0;
+
+ memcpy(&src_win, &src->win, sizeof(src_win));
+ memcpy(&dst_win, &dst->win, sizeof(dst_win));
+
+ b2r2_log_info(cont->dev, "%s: src_win=(%d, %d, %d, %d) "
+ "dst_win=(%d, %d, %d, %d)\n", __func__,
+ src_win.x, src_win.y, src_win.width, src_win.height,
+ dst_win.x, dst_win.y, dst_win.width, dst_win.height);
+
+ dst->win.height = B2R2_ROTATE_MAX_WIDTH;
+ if (dst->win.width % B2R2_ROTATE_MAX_WIDTH)
+ dst->win.width -= dst->win.width % B2R2_ROTATE_MAX_WIDTH;
+
+ while (x_pixels < dst_win.width) {
+ u32 src_x = src->win.x;
+ u32 src_w = src->win.width;
+ u32 dst_y = dst->win.y;
+ u32 dst_h = dst->win.height;
+
+ dst->win.width = min(dst->win.width, dst_win.width -
+ (int)x_pixels);
+ src->win.height = dst->win.width;
+
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
+
+ while (y_pixels < dst_win.height) {
+ dst->win.height = min(dst->win.height,
+ dst_win.height - (int)y_pixels);
+ src->win.width = dst->win.height;
+
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
+
+ ret = configure_rotate(cont, node, src, dst,
+ ivmx, &node, job);
+ if (ret < 0)
+ goto error;
+
+ src->win.x += (src->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ src->win.width : -src->win.width;
+ dst->win.y += (dst->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ dst->win.height : -dst->win.height;
+
+ y_pixels += dst->win.height;
+ }
+
+ src->win.x = src_x;
+ src->win.y += (src->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ?
+ src->win.height : -src->win.height;
+ src->win.width = src_w;
+
+ dst->win.x += (dst->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ?
+ dst->win.width : -dst->win.width;
+ dst->win.y = dst_y;
+ dst->win.height = dst_h;
+
+ x_pixels += dst->win.width;
+ y_pixels = 0;
+
+ }
+
+ memcpy(&src->win, &src_win, sizeof(src->win));
+ memcpy(&dst->win, &dst_win, sizeof(dst->win));
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_downscale() - configures a combined rotate and downscale
+ *
+ * When doing a downscale it is better to do the rotation last.
+ */
+static int configure_rot_downscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
+{
+ int ret;
+
+ struct b2r2_node_split_buf *src = &this->src;
+ struct b2r2_node_split_buf *dst = &this->dst;
+ struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
+
+ tmp->win.x = 0;
+ tmp->win.y = 0;
+ tmp->win.width = dst->win.height;
+ tmp->win.height = dst->win.width;
+
+ ret = configure_scale(cont, node, src, tmp, this->h_rsf, this->v_rsf,
+ this->ivmx, &node, this);
+ if (ret < 0)
+ goto error;
+
+ ret = configure_sub_rot(cont, node, tmp, dst, NULL, &node, this);
+ if (ret < 0)
+ goto error;
+
+ *next = node;
+
+ return 0;
+
+error:
+ b2r2_log_info(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rot_upscale() - configures a combined rotate and upscale
+ *
+ * When doing an upscale it is better to do the rotation first.
+ */
+static int configure_rot_upscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ /* TODO: Implement a optimal upscale (rotation first) */
+ return configure_rot_downscale(cont, this, node, next);
+}
+
+/**
+ * configure_rot_scale() - configures a combined rotation and scaling op
+ */
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
+{
+ int ret;
+
+ bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10);
+
+ if (upscale)
+ ret = configure_rot_upscale(cont, this, node, next);
+ else
+ ret = configure_rot_downscale(cont, this, node, next);
+
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_direct_fill() - configures the given node for direct fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL;
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Source setup */
+
+ /* It seems B2R2 checks so that source and dest has the same format */
+ node->node.GROUP3.B2R2_STY = to_native_fmt(dst->fmt);
+ node->node.GROUP2.B2R2_S1CF = color;
+ node->node.GROUP2.B2R2_S2CF = 0;
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_direct_copy() - configures the node for direct copy
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * This operation will always consume one node only.
+ */
+static void configure_direct_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY;
+
+ /* Source setup, use the base function to avoid altering the INS */
+ set_src(&node->node.GROUP3, src->addr, src);
+
+ /* Target setup */
+ set_target(node, dst->addr, dst);
+
+ /* Consume the node */
+ *next = node->next;
+}
+
+/**
+ * configure_fill() - configures the given node for color fill
+ *
+ * @node - the node to configure
+ * @color - the fill color
+ * @fmt - the source color format
+ * @dst - the destination buffer
+ * @next - the next empty node in the node list
+ *
+ * A normal fill operation can be combined with any other per pixel operations
+ * such as blend.
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ struct b2r2_node *last;
+
+ /* Configure the destination */
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2 |
+ B2R2_CIC_COLOR_FILL;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+
+ /* B2R2 has a bug that disables color fill from S2. As a
+ workaround we use S1 for the color. */
+ node->node.GROUP2.B2R2_S1CF = 0;
+ node->node.GROUP2.B2R2_S2CF = color;
+
+ /* TO BE REMOVED: */
+ set_src_2(node, dst->addr, dst);
+ node->node.GROUP4.B2R2_STY = to_native_fmt(fmt);
+
+ /* Setup the iVMX for color conversion */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+
+ if ((dst->type == B2R2_FMT_TYPE_PLANAR) ||
+ (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR)) {
+
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP8.B2R2_FCTL =
+ B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+ node->node.GROUP9.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP9.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+
+ node->node.GROUP10.B2R2_RSF =
+ (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) |
+ (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10));
+ node->node.GROUP10.B2R2_RZI =
+ B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT);
+ }
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_copy() - configures the given node for a copy operation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes as are required to write to the
+ * destination format.
+ */
+static int configure_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_dst(cont, node, dst, ivmx, &last);
+ if (ret < 0)
+ goto error;
+
+ /* Configure the source for each node */
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ " Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
+ if (this != NULL &&
+ (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY)
+ != 0) {
+ u32 key_color = 0;
+
+ node->node.GROUP0.B2R2_ACK |=
+ B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT |
+ B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN |
+ B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
+
+ key_color = to_RGB888(this->flag_param, src->fmt);
+ node->node.GROUP12.B2R2_KEY1 = key_color;
+ node->node.GROUP12.B2R2_KEY2 = key_color;
+ }
+
+ if (this != NULL &&
+ (this->flags &
+ B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
+ struct b2r2_blt_request *request =
+ container_of(this, struct b2r2_blt_request,
+ node_split_job);
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT;
+ node->node.GROUP7.B2R2_CCO =
+ B2R2_CCO_CLUT_COLOR_CORRECTION |
+ B2R2_CCO_CLUT_UPDATE;
+ node->node.GROUP7.B2R2_CML = request->clut_phys_addr;
+ }
+ /* Configure the source(s) */
+ configure_src(cont, node, src, ivmx);
+
+ node = node->next;
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_rotate() - configures the given node for rotation
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ *
+ * This operation will consume as many nodes are are required by the combination
+ * of rotating and writing the destination format.
+ */
+static int configure_rotate(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
+
+ b2r2_log_debug(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "-----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_scale() - configures the given node for scaling
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @dst - the destination buffer
+ * @h_rsf - the horizontal rescale factor
+ * @v_rsf - the vertical rescale factor
+ * @ivmx - the iVMX to use for color conversion
+ * @next - the next empty node in the node list
+ */
+static int configure_scale(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src,
+ struct b2r2_node_split_buf *dst,
+ u16 h_rsf, u16 v_rsf,
+ const u32 *ivmx, struct b2r2_node **next,
+ struct b2r2_node_split_job *this)
+{
+ int ret;
+
+ struct b2r2_node *last;
+
+ struct b2r2_filter_spec *hf = NULL;
+ struct b2r2_filter_spec *vf = NULL;
+
+ u32 fctl = 0;
+ u32 rsf = 0;
+ u32 rzi = 0;
+ u32 hsrc_init = 0;
+ u32 vsrc_init = 0;
+ u32 hfp = 0;
+ u32 vfp = 0;
+
+ u16 luma_h_rsf = h_rsf;
+ u16 luma_v_rsf = v_rsf;
+
+ struct b2r2_filter_spec *luma_hf = NULL;
+ struct b2r2_filter_spec *luma_vf = NULL;
+
+ u32 luma_fctl = 0;
+ u32 luma_rsf = 0;
+ u32 luma_rzi = 0;
+ u32 luma_hsrc_init = 0;
+ u32 luma_vsrc_init = 0;
+ u32 luma_hfp = 0;
+ u32 luma_vfp = 0;
+
+ s32 src_x;
+ s32 src_y;
+ s32 src_w;
+ s32 src_h;
+
+ bool upsample;
+ bool downsample;
+
+ struct b2r2_blt_rect tmp_win = src->win;
+ bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
+
+ /* Rescale the normalized source window */
+ src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf);
+ src_y = inv_rescale(src->win.y - src->rect.y, luma_v_rsf);
+ src_w = inv_rescale(src->win.width, luma_h_rsf);
+ src_h = inv_rescale(src->win.height, luma_v_rsf);
+
+ /* Convert to src coordinate space */
+ src->win.x = (src_x >> 10) + src->rect.x;
+ src->win.y = (src_y >> 10) + src->rect.y;
+
+ /*
+ * Since the stripe might start and end on a fractional pixel
+ * we need to count all the touched pixels in the width.
+ *
+ * Example:
+ * src_x = 1.8, src_w = 2.8
+ *
+ * The stripe touches pixels 1.8 through 4.6, i.e. 4 pixels
+ */
+ src->win.width = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src->win.height = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ luma_hsrc_init = src_x & 0x3ff;
+ luma_vsrc_init = src_y & 0x3ff;
+
+ /* Check for upsampling of chroma */
+ upsample = !src_raster && !is_yuv444_fmt(src->fmt);
+ if (upsample) {
+ h_rsf /= 2;
+
+ if (is_yuv420_fmt(src->fmt))
+ v_rsf /= 2;
+ }
+
+ /* Check for downsampling of chroma */
+ downsample = !dst_raster && !is_yuv444_fmt(dst->fmt);
+ if (downsample) {
+ h_rsf *= 2;
+
+ if (is_yuv420_fmt(dst->fmt))
+ v_rsf *= 2;
+ }
+
+ src_x = inv_rescale(tmp_win.x - src->rect.x, h_rsf);
+ src_y = inv_rescale(tmp_win.y - src->rect.y, v_rsf);
+ hsrc_init = src_x & 0x3ff;
+ vsrc_init = src_y & 0x3ff;
+
+ /* Configure resize and filters */
+ fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+
+ rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+ luma_rsf = (luma_h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
+ (luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
+
+ rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+
+ /*
+ * We should only filter if there is an actual rescale (i.e. not when
+ * up or downsampling).
+ */
+ if (luma_h_rsf != (1 << 10)) {
+ hf = b2r2_filter_find(h_rsf);
+ luma_hf = b2r2_filter_find(luma_h_rsf);
+ }
+ if (luma_v_rsf != (1 << 10)) {
+ vf = b2r2_filter_find(v_rsf);
+ luma_vf = b2r2_filter_find(luma_v_rsf);
+ }
+
+ if (hf) {
+ fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ hfp = hf->h_coeffs_phys_addr;
+ }
+
+ if (vf) {
+ fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER;
+ vfp = vf->v_coeffs_phys_addr;
+ }
+
+ if (luma_hf) {
+ luma_fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER;
+ luma_hfp = luma_hf->h_coeffs_phys_addr;
+ }
+
+ if (luma_vf) {
+ luma_fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER;
+ luma_vfp = luma_vf->v_coeffs_phys_addr;
+ }
+
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
+ if (ret < 0)
+ goto error;
+
+ do {
+ bool chroma_rescale =
+ (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
+ bool luma_rescale =
+ (luma_h_rsf != (1 << 10)) ||
+ (luma_v_rsf != (1 << 10));
+ bool dst_chroma = node->node.GROUP1.B2R2_TTY &
+ B2R2_TTY_CHROMA_NOT_LUMA;
+ bool dst_luma = !dst_chroma;
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: Internal error! Out "
+ "of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
+
+ /*
+ * If the source format is anything other than raster, we
+ * always have to enable both chroma and luma resizers. This
+ * could be a bug in the hardware, since it is not mentioned in
+ * the specification.
+ *
+ * Otherwise, we will only enable the chroma resizer when
+ * writing chroma and the luma resizer when writing luma
+ * (or both when writing raster). Also, if there is no rescale
+ * to be done there's no point in using the resizers.
+ */
+
+ if (!src_raster || (chroma_rescale &&
+ (dst_raster || dst_chroma))) {
+ /* Enable chroma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+
+ node->node.GROUP9.B2R2_RSF = rsf;
+ node->node.GROUP9.B2R2_RZI = rzi;
+ node->node.GROUP9.B2R2_HFP = hfp;
+ node->node.GROUP9.B2R2_VFP = vfp;
+ }
+
+ if (!src_raster || (luma_rescale &&
+ (dst_raster || dst_luma))) {
+ /* Enable luma resize */
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_RESCALE2D_ENABLED;
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
+ node->node.GROUP8.B2R2_FCTL |= luma_fctl;
+
+ node->node.GROUP10.B2R2_RSF = luma_rsf;
+ node->node.GROUP10.B2R2_RZI = luma_rzi;
+ node->node.GROUP10.B2R2_HFP = luma_hfp;
+ node->node.GROUP10.B2R2_VFP = luma_vfp;
+ /*
+ * Scaling operation from raster to a multi-buffer
+ * format, requires the raster input to be scaled
+ * before luminance information can be extracted.
+ * Raster input is scaled by the chroma resizer.
+ * Luma resizer only handles luminance data which
+ * exists in a separate buffer in source image,
+ * as is the case with YUV planar/semi-planar formats.
+ */
+ if (src_raster) {
+ /* Activate chroma scaling */
+ node->node.GROUP0.B2R2_CIC |=
+ B2R2_CIC_RESIZE_CHROMA;
+ node->node.GROUP8.B2R2_FCTL |= fctl;
+ /*
+ * Color data must be scaled
+ * to the same size as luma.
+ * Use luma scaling parameters.
+ */
+ node->node.GROUP9.B2R2_RSF = luma_rsf;
+ node->node.GROUP9.B2R2_RZI = luma_rzi;
+ node->node.GROUP9.B2R2_HFP = luma_hfp;
+ node->node.GROUP9.B2R2_VFP = luma_vfp;
+ }
+ }
+
+ b2r2_log_info(cont->dev, "%s:\n"
+ "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
+ "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
+ "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
+ "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n"
+ "----------------------------------\n",
+ __func__, node->node.GROUP1.B2R2_TXY,
+ node->node.GROUP1.B2R2_TSZ,
+ node->node.GROUP3.B2R2_SXY,
+ node->node.GROUP3.B2R2_SSZ,
+ node->node.GROUP4.B2R2_SXY,
+ node->node.GROUP4.B2R2_SSZ,
+ node->node.GROUP5.B2R2_SXY,
+ node->node.GROUP5.B2R2_SSZ);
+
+ node = node->next;
+
+ } while (node != last);
+
+
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+/**
+ * configure_src() - configures the source registers and the iVMX
+ *
+ * @node - the node to configure
+ * @src - the source buffer
+ * @ivmx - the iVMX to use for color conversion
+ *
+ * This operation will not consume any nodes
+ */
+static void configure_src(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *src, const u32 *ivmx)
+{
+ struct b2r2_node_split_buf tmp_buf;
+
+ b2r2_log_info(cont->dev,
+ "%s: src.win=(%d, %d, %d, %d)\n", __func__,
+ src->win.x, src->win.y, src->win.width,
+ src->win.height);
+
+ /* Configure S1 - S3 */
+ switch (src->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ set_src_2(node, src->addr, src);
+ break;
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ /*
+ * For 420 and 422 the chroma has lower resolution than the
+ * luma
+ */
+ if (!is_yuv444_fmt(src->fmt)) {
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src);
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ memcpy(&tmp_buf, src, sizeof(tmp_buf));
+
+ if (!is_yuv444_fmt(src->fmt)) {
+ /*
+ * Each chroma buffer will have half as many values
+ * per line as the luma buffer
+ */
+ tmp_buf.pitch = (tmp_buf.pitch + 1) / 2;
+
+ /* Horizontal resolution is half */
+ tmp_buf.win.x >>= 1;
+ tmp_buf.win.width = (tmp_buf.win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well
+ */
+ if (is_yuv420_fmt(src->fmt)) {
+ tmp_buf.win.height =
+ (tmp_buf.win.height + 1) / 2;
+ tmp_buf.win.y >>= 1;
+ }
+ }
+
+ set_src_3(node, src->addr, src); /* Y */
+ set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); /* U */
+ set_src_1(node, tmp_buf.chroma_cr_addr, &tmp_buf); /* V */
+
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+
+ /* Configure the iVMX for color space conversions */
+ if (ivmx != NULL)
+ set_ivmx(node, ivmx);
+}
+
+/**
+ * configure_bg() - configures a background for the given node
+ *
+ * @node - the node to configure
+ * @bg - the background buffer
+ * @swap_fg_bg - if true, fg will be on s1 instead of s2
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_bg(struct b2r2_control *cont,
+ struct b2r2_node *node,
+ struct b2r2_node_split_buf *bg, bool swap_fg_bg)
+{
+ b2r2_log_info(cont->dev,
+ "%s: bg.win=(%d, %d, %d, %d)\n", __func__,
+ bg->win.x, bg->win.y, bg->win.width,
+ bg->win.height);
+
+ /* Configure S1 */
+ switch (bg->type) {
+ case B2R2_FMT_TYPE_RASTER:
+ if (swap_fg_bg) {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_SWAP_FG_BG;
+
+ set_src(&node->node.GROUP4, bg->addr, bg);
+ } else {
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP3, bg->addr, bg);
+ }
+ break;
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ break;
+ }
+}
+
+/**
+ * configure_dst() - configures the destination registers of the given node
+ *
+ * @node - the node to configure
+ * @ivmx - the iVMX to use for color conversion
+ * @dst - the destination buffer
+ *
+ * This operation will consume as many nodes as are required to write the
+ * destination format.
+ */
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next)
+{
+ int ret;
+ int nbr_planes = 1;
+ int i;
+
+ struct b2r2_node_split_buf dst_planes[3];
+
+ b2r2_log_info(cont->dev,
+ "%s: dst.win=(%d, %d, %d, %d)\n", __func__,
+ dst->win.x, dst->win.y, dst->win.width,
+ dst->win.height);
+
+ memcpy(&dst_planes[0], dst, sizeof(dst_planes[0]));
+
+ if (dst->type != B2R2_FMT_TYPE_RASTER) {
+ /* There will be at least 2 planes */
+ nbr_planes = 2;
+
+ memcpy(&dst_planes[1], dst, sizeof(dst_planes[1]));
+
+ dst_planes[1].addr = dst->chroma_addr;
+ dst_planes[1].plane_selection = B2R2_TTY_CHROMA_NOT_LUMA;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* Horizontal resolution is half */
+ dst_planes[1].win.x /= 2;
+ /*
+ * Must round up the chroma size to handle cases when
+ * luma size is not divisible by 2. E.g. luma width==7 r
+ * equires chroma width==4. Chroma width==7/2==3 is only
+ * enough for luma width==6.
+ */
+ dst_planes[1].win.width =
+ (dst_planes[1].win.width + 1) / 2;
+
+ /*
+ * If the buffer is in YUV420 format, the vertical
+ * resolution is half as well. Height must be rounded in
+ * the same way as is done for width.
+ */
+ if (is_yuv420_fmt(dst->fmt)) {
+ dst_planes[1].win.y /= 2;
+ dst_planes[1].win.height =
+ (dst_planes[1].win.height + 1) / 2;
+ }
+ }
+
+ if (dst->type == B2R2_FMT_TYPE_PLANAR) {
+ /* There will be a third plane as well */
+ nbr_planes = 3;
+
+ if (!is_yuv444_fmt(dst->fmt)) {
+ /* The chroma planes have half the luma pitch */
+ dst_planes[1].pitch /= 2;
+ }
+
+ memcpy(&dst_planes[2], &dst_planes[1],
+ sizeof(dst_planes[2]));
+ dst_planes[2].addr = dst->chroma_cr_addr;
+
+ /*
+ * The third plane will be Cr.
+ * The flag B2R2_TTY_CB_NOT_CR actually works
+ * the other way around, i.e. as if it was
+ * B2R2_TTY_CR_NOT_CB.
+ */
+ dst_planes[2].chroma_selection = B2R2_TTY_CB_NOT_CR;
+ }
+
+ }
+
+ /* Configure one node for each plane */
+ for (i = 0; i < nbr_planes; i++) {
+
+ if (node == NULL) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n", __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * When writing chroma, there's no need to read the luma and
+ * vice versa.
+ */
+ if ((node->node.GROUP3.B2R2_STY & B2R2_NATIVE_YUV) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ if (i != 1) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER;
+ }
+ if (i != 2) {
+ node->node.GROUP0.B2R2_INS &=
+ ~B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+ node->node.GROUP0.B2R2_INS |=
+ B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER;
+ }
+ } else if ((node->node.GROUP3.B2R2_STY &
+ (B2R2_NATIVE_YCBCR42X_MBN |
+ B2R2_NATIVE_YCBCR42X_R2B)) &&
+ (nbr_planes > 1)) {
+ if (i != 0) {
+ node->node.GROUP4.B2R2_STY |=
+ B2R2_S3TY_ENABLE_BLANK_ACCESS;
+ }
+ }
+
+ set_target(node, dst_planes[i].addr, &dst_planes[i]);
+
+ node = node->next;
+ }
+
+ /* Consume the nodes */
+ *next = node;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * configure_blend() - configures the given node for alpha blending
+ *
+ * @node - the node to configure
+ * @flags - the flags passed in the blt_request
+ * @global_alpha - the global alpha to use (if enabled in flags)
+ *
+ * This operation will not consume any nodes.
+ *
+ * NOTE: This method should be called _AFTER_ the destination has been
+ * configured.
+ *
+ * WARNING: Take care when using this with semi-planar or planar sources since
+ * either S1 or S2 will be overwritten!
+ */
+static void configure_blend(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 flags, u32 global_alpha)
+{
+ node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3);
+
+ /* Check if the foreground is premultiplied */
+ if ((flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) != 0)
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_NOT_PREMULT;
+ else
+ node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_PREMULT;
+
+ /* Check if global alpha blend should be enabled */
+ if (flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) {
+
+ /* B2R2 expects the global alpha to be in 0...128 range */
+ global_alpha = (global_alpha*128)/255;
+
+ node->node.GROUP0.B2R2_ACK |=
+ global_alpha << B2R2_ACK_GALPHA_ROPID_SHIFT;
+ } else {
+ node->node.GROUP0.B2R2_ACK |=
+ (128 << B2R2_ACK_GALPHA_ROPID_SHIFT);
+ }
+}
+
+/**
+ * configure_clip() - configures destination clipping for the given node
+ *
+ * @node - the node to configure
+ * @clip_rect - the clip rectangle
+ *
+ * This operation does not consume any nodes.
+ */
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
+ struct b2r2_blt_rect *clip_rect)
+{
+ s32 l = clip_rect->x;
+ s32 r = clip_rect->x + clip_rect->width - 1;
+ s32 t = clip_rect->y;
+ s32 b = clip_rect->y + clip_rect->height - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+
+ /* Clip window setup */
+ node->node.GROUP6.B2R2_CWO =
+ ((t & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((l & 0x7FFF) << B2R2_CWO_X_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT) |
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT);
+}
+
+/**
+ * set_buf() - configures the given buffer with the provided values
+ *
+ * @addr - the physical base address
+ * @img - the blt image to base the buffer on
+ * @rect - the rectangle to use
+ * @color_fill - determines whether the buffer should be used for color fill
+ * @color - the color to use in case of color fill
+ */
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr,
+ const struct b2r2_blt_img *img,
+ const struct b2r2_blt_rect *rect,
+ bool color_fill,
+ u32 color)
+{
+ memset(buf, 0, sizeof(*buf));
+
+ buf->fmt = img->fmt;
+ buf->type = get_fmt_type(img->fmt);
+
+ if (color_fill) {
+ buf->type = B2R2_FMT_TYPE_RASTER;
+ buf->color = color;
+ } else {
+ buf->addr = addr;
+
+ buf->alpha_range = get_alpha_range(img->fmt);
+
+ if (img->pitch == 0)
+ buf->pitch = fmt_byte_pitch(img->fmt, img->width);
+ else
+ buf->pitch = img->pitch;
+
+ buf->height = img->height;
+ buf->width = img->width;
+
+ switch (buf->type) {
+ case B2R2_FMT_TYPE_SEMI_PLANAR:
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ break;
+ case B2R2_FMT_TYPE_PLANAR:
+ if (is_yuv422_fmt(buf->fmt) ||
+ is_yuv420_fmt(buf->fmt)) {
+ buf->chroma_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ } else {
+ buf->chroma_cr_addr = (u32)(((u8 *)addr) +
+ buf->pitch * buf->height);
+ }
+ if (is_yuv420_fmt(buf->fmt)) {
+ /*
+ * Use ceil(height/2) in case
+ * buffer height is not divisible by 2.
+ */
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yuv422_fmt(buf->fmt)) {
+ buf->chroma_cr_addr =
+ (u32)(((u8 *)buf->chroma_addr) +
+ (buf->pitch >> 1) * buf->height);
+ } else if (is_yvu420_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) *
+ ((buf->height + 1) >> 1));
+ } else if (is_yvu422_fmt(buf->fmt)) {
+ buf->chroma_addr =
+ (u32)(((u8 *)buf->chroma_cr_addr) +
+ (buf->pitch >> 1) * buf->height);
+ }
+ break;
+ default:
+ break;
+ }
+
+ memcpy(&buf->rect, rect, sizeof(buf->rect));
+ }
+}
+
+/**
+ * setup_tmp_buf() - configure a temporary buffer
+ */
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *tmp,
+ u32 max_size,
+ enum b2r2_blt_fmt pref_fmt,
+ u32 pref_width,
+ u32 pref_height)
+{
+ int ret;
+
+ enum b2r2_blt_fmt fmt;
+
+ u32 width;
+ u32 height;
+ u32 pitch;
+ u32 size;
+
+ /* Determine what format we should use for the tmp buf */
+ if (is_rgb_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
+ } else if (is_bgr_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_ABGR8888;
+ } else if (is_yvu_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_CB_Y_CR_Y;
+ } else if (is_yuv_fmt(pref_fmt)) {
+ fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
+ } else {
+ /* Wait, what? */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Cannot create tmp buf from this fmt (%d)\n",
+ __func__, pref_fmt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* See if we can fit the entire preferred rectangle */
+ width = pref_width;
+ height = pref_height;
+ pitch = fmt_byte_pitch(fmt, width);
+ size = pitch * height;
+
+ if (size > max_size) {
+ /* We need to limit the size, so we choose a different width */
+ width = MIN(width, B2R2_RESCALE_MAX_WIDTH);
+ pitch = fmt_byte_pitch(fmt, width);
+ height = MIN(height, max_size / pitch);
+ size = pitch * height;
+ }
+
+ /* We should at least have enough room for one scanline */
+ if (height == 0) {
+ b2r2_log_warn(cont->dev, "%s: Not enough tmp mem!\n",
+ __func__);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+
+ tmp->fmt = fmt;
+ tmp->type = B2R2_FMT_TYPE_RASTER;
+ tmp->height = height;
+ tmp->width = width;
+ tmp->pitch = pitch;
+
+ tmp->rect.width = width;
+ tmp->rect.height = tmp->height;
+ tmp->alpha_range = B2R2_TY_ALPHA_RANGE_255;
+
+ return 0;
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+
+}
+
+/**
+ * get_alpha_range() - returns the alpha range of the given format
+ */
+static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */
+ default:
+ return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
+ }
+}
+
+/**
+ * get_alpha() - returns the pixel alpha in 0...255 range
+ */
+static u8 get_alpha(enum b2r2_blt_fmt fmt, u32 pixel)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ return (pixel >> 24) & 0xff;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return pixel & 0xff;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return (pixel & 0xfff) >> 16;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return (((pixel >> 12) & 0xf) * 255) / 15;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return (pixel >> 15) * 255;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return pixel * 255;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return pixel;
+ default:
+ return 255;
+ }
+}
+
+/**
+ * set_alpha() - returns a color value with the alpha component set
+ */
+static u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color)
+{
+ u32 alpha_mask;
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ color &= 0x00ffffff;
+ alpha_mask = alpha << 24;
+ break;
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ color &= 0xffffff00;
+ alpha_mask = alpha;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ color &= 0x00ffff;
+ alpha_mask = alpha << 16;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ color &= 0x0fff;
+ alpha_mask = (alpha << 8) & 0xF000;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ color &= 0x7fff;
+ alpha_mask = (alpha / 255) << 15 ;
+ break;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ color = 0;
+ alpha_mask = (alpha / 255);
+ break;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ color = 0;
+ alpha_mask = alpha;
+ break;
+ default:
+ alpha_mask = 0;
+ }
+
+ return color | alpha_mask;
+}
+
+/**
+ * fmt_has_alpha() - returns whether the given format carries an alpha value
+ */
+static bool fmt_has_alpha(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_rgb_fmt() - returns whether the given format is a rgb format
+ */
+static bool is_rgb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_bgr_fmt() - returns whether the given format is a bgr format
+ */
+static bool is_bgr_fmt(enum b2r2_blt_fmt fmt)
+{
+ return (fmt == B2R2_BLT_FMT_32_BIT_ABGR8888);
+}
+
+/**
+ * is_yuv_fmt() - returns whether the given format is a yuv format
+ */
+static bool is_yuv_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu_fmt() - returns whether the given format is a yvu format
+ */
+static bool is_yvu_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yuv420_fmt() - returns whether the given format is a yuv420 format
+ */
+static bool is_yuv420_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yuv422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_yvu420_fmt() - returns whether the given format is a yvu420 format
+ */
+static bool is_yvu420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_yvu422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+/**
+ * is_yuv444_fmt() - returns whether the given format is a yuv444 format
+ */
+static bool is_yuv444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_fmt_byte_pitch() - returns the pitch of a pixmap with the given width
+ */
+static int fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width)
+{
+ int pitch;
+
+ switch (fmt) {
+
+ case B2R2_BLT_FMT_1_BIT_A1:
+ pitch = width >> 3; /* Shift is faster than division */
+ if ((width & 0x3) != 0) /* Check for remainder */
+ pitch++;
+ return pitch;
+
+ case B2R2_BLT_FMT_8_BIT_A8: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: /* Fall through */
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: /* Fall through */
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return width;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */
+ case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */
+ case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return width << 1;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_YUV888: /* Fall through */
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return width * 3;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_AYUV8888: /* Fall through */
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return width << 2;
+
+ default:
+ /* Should never, ever happen */
+ BUG_ON(1);
+ return 0;
+ }
+}
+
+/**
+ * to_native_fmt() - returns the native B2R2 format
+ */
+static enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
+{
+
+ switch (fmt) {
+ case B2R2_BLT_FMT_UNUSED:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return B2R2_NATIVE_A1;
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_NATIVE_A8;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ return B2R2_NATIVE_RGB565;
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ return B2R2_NATIVE_ARGB4444;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ return B2R2_NATIVE_ARGB1555;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ return B2R2_NATIVE_ARGB8565;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ return B2R2_NATIVE_RGB888;
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888: /* Not actually supported by HW */
+ return B2R2_NATIVE_YCBCR888;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ return B2R2_NATIVE_ARGB8888;
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888: /* Not actually supported by HW */
+ return B2R2_NATIVE_AYCBCR8888;
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ return B2R2_NATIVE_YCBCR422R;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return B2R2_NATIVE_YCBCR42X_R2B;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_NATIVE_YCBCR42X_MBN;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_NATIVE_YUV;
+ default:
+ /* Should never ever happen */
+ return B2R2_NATIVE_BYTE;
+ }
+}
+
+/**
+ * Bit-expand the color from fmt to RGB888 with blue at LSB.
+ * Copy MSBs into missing LSBs.
+ */
+static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+{
+ u32 out_color = 0;
+ u32 r = 0;
+ u32 g = 0;
+ u32 b = 0;
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8);
+ g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4);
+ b = ((color & 0xf) << 4) | (color & 0xf);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4);
+ g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ out_color = color & 0xffffff;
+ break;
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ r = (color & 0xff) << 16;
+ g = color & 0xff00;
+ b = (color & 0xff0000) >> 16;
+ out_color = r | g | b;
+ break;
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3);
+ g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1);
+ b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2);
+ out_color = r | g | b;
+ break;
+ default:
+ break;
+ }
+
+ return out_color;
+}
+
+/**
+ * get_fmt_type() - returns the type of the given format (raster, planar, etc.)
+ */
+static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return B2R2_FMT_TYPE_RASTER;
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return B2R2_FMT_TYPE_PLANAR;
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return B2R2_FMT_TYPE_SEMI_PLANAR;
+ default:
+ return B2R2_FMT_TYPE_RASTER;
+ }
+}
+
+/**
+ * is_transform() - returns whether the given request is a transform operation
+ */
+static bool is_transform(const struct b2r2_blt_request *req)
+{
+ return (req->user_req.transform != B2R2_BLT_TRANSFORM_NONE) ||
+ (req->user_req.src_rect.width !=
+ req->user_req.dst_rect.width) ||
+ (req->user_req.src_rect.height !=
+ req->user_req.dst_rect.height);
+}
+
+/**
+ * rescale() - rescales the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf)
+{
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (sf == 0) {
+ b2r2_log_err(cont->dev, "%s: Scale factor is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ /*
+ * This is normally not safe to do, since it drastically decreases the
+ * precision of the integer part of the dimension. But since the B2R2
+ * hardware only has 12-bit registers for these values, we are safe.
+ */
+ return (dim << 20) / sf;
+}
+
+/**
+ * inv_rescale() - does an inverted rescale of the given dimension
+ *
+ * Returns the rescaled dimension in 22.10 fixed point format.
+ */
+static s32 inv_rescale(s32 dim, u16 sf)
+{
+ if (sf == 0)
+ return dim;
+
+ return dim * sf;
+}
+
+/**
+ * set_target() - sets the target registers of the given node
+ */
+static void set_target(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ s32 l;
+ s32 r;
+ s32 t;
+ s32 b;
+
+ if (buf->tmp_buf_index)
+ node->dst_tmp_index = buf->tmp_buf_index;
+
+ node->node.GROUP1.B2R2_TBA = addr;
+ node->node.GROUP1.B2R2_TTY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->chroma_selection | buf->hso |
+ buf->vso | buf->dither | buf->plane_selection;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP1.B2R2_TTY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP1.B2R2_TSZ =
+ ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ node->node.GROUP1.B2R2_TXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Check if the rectangle is outside the buffer */
+ if (buf->vso == B2R2_TY_VSO_BOTTOM_TO_TOP)
+ t = buf->win.y - (buf->win.height - 1);
+ else
+ t = buf->win.y;
+
+ if (buf->hso == B2R2_TY_HSO_RIGHT_TO_LEFT)
+ l = buf->win.x - (buf->win.width - 1);
+ else
+ l = buf->win.x;
+
+ r = l + buf->win.width;
+ b = t + buf->win.height;
+
+ /* Clip to the destination buffer to prevent memory overwrites */
+ if ((l < 0) || (r > buf->width) || (t < 0) || (b > buf->height)) {
+ /* The clip rectangle is including the borders */
+ l = MAX(l, 0);
+ r = MIN(r, buf->width) - 1;
+ t = MAX(t, 0);
+ b = MIN(b, buf->height) - 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED;
+ node->node.GROUP6.B2R2_CWO =
+ ((l & 0x7FFF) << B2R2_CWS_X_SHIFT) |
+ ((t & 0x7FFF) << B2R2_CWS_Y_SHIFT);
+ node->node.GROUP6.B2R2_CWS =
+ ((r & 0x7FFF) << B2R2_CWO_X_SHIFT) |
+ ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT);
+ }
+
+}
+
+/**
+ * set_src() - configures the given source register with the given values
+ */
+static void set_src(struct b2r2_src_config *src, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ src->B2R2_SBA = addr;
+ src->B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ src->B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ src->B2R2_SSZ = ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) |
+ ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT);
+ src->B2R2_SXY = ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+}
+
+/**
+ * set_src_1() - sets the source 1 registers of the given node
+ */
+static void set_src_1(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 1;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM;
+
+ node->node.GROUP3.B2R2_SBA = addr;
+ node->node.GROUP3.B2R2_STY = buf->pitch | to_native_fmt(buf->fmt) |
+ buf->alpha_range | buf->hso | buf->vso;
+
+ if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 ||
+ buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888)
+ node->node.GROUP3.B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE;
+
+ node->node.GROUP3.B2R2_SXY =
+ ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) |
+ ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT);
+
+ /* Source 1 has no size register */
+}
+
+/**
+ * set_src_2() - sets the source 2 registers of the given node
+ */
+static void set_src_2(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 2;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP4, addr, buf);
+}
+
+/**
+ * set_src_3() - sets the source 3 registers of the given node
+ */
+static void set_src_3(struct b2r2_node *node, u32 addr,
+ struct b2r2_node_split_buf *buf)
+{
+ if (buf->tmp_buf_index)
+ node->src_tmp_index = buf->tmp_buf_index;
+
+ node->src_index = 3;
+
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_3;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_3_FETCH_FROM_MEM;
+
+ set_src(&node->node.GROUP5, addr, buf);
+}
+
+/**
+ * set_ivmx() - configures the iVMX registers with the given values
+ */
+static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values)
+{
+ node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX;
+ node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
+
+ node->node.GROUP15.B2R2_VMX0 = vmx_values[0];
+ node->node.GROUP15.B2R2_VMX1 = vmx_values[1];
+ node->node.GROUP15.B2R2_VMX2 = vmx_values[2];
+ node->node.GROUP15.B2R2_VMX3 = vmx_values[3];
+}
+
+/**
+ * reset_nodes() - clears the node list
+ */
+static void reset_nodes(struct b2r2_node *node)
+{
+ while (node != NULL) {
+ memset(&node->node, 0, sizeof(node->node));
+
+ node->src_tmp_index = 0;
+ node->dst_tmp_index = 0;
+
+ /* TODO: Implement support for short linked lists */
+ node->node.GROUP0.B2R2_CIC = 0x7ffff;
+
+ if (node->next != NULL)
+ node->node.GROUP0.B2R2_NIP =
+ node->next->physical_address;
+ node = node->next;
+ }
+}
+
+int b2r2_node_split_init(struct b2r2_control *cont)
+{
+ return 0;
+}
+
+void b2r2_node_split_exit(struct b2r2_control *cont)
+{
+
+}
diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h
new file mode 100644
index 00000000000..a577241c31b
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_node_split.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 node splitter
+ *
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __B2R2_NODE_SPLIT_H_
+#define __B2R2_NODE_SPLIT_H_
+
+#include "b2r2_internal.h"
+#include "b2r2_hw.h"
+
+/**
+ * b2r2_node_split_analyze() - Analyzes a B2R2 request
+ *
+ * @req - The request to analyze
+ * @max_buf_size - The largest size allowed for intermediate buffers
+ * @node_count - Number of nodes required for the job
+ * @buf_count - Number of intermediate buffers required for the job
+ * @bufs - An array of buffers needed for intermediate buffers
+ *
+ * Analyzes the request and determines how many nodes and intermediate buffers
+ * are required.
+ *
+ * It is the responsibility of the caller to allocate memory and assign the
+ * physical addresses. After that b2r2_node_split_assign_buffers should be
+ * called to assign the buffers to the right nodes.
+ *
+ * Returns:
+ * A handle identifing the analyzed request if successful, a negative
+ * value otherwise.
+ */
+int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size,
+ u32 *node_count, struct b2r2_work_buf **bufs, u32* buf_count,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_configure() - Performs a node split
+ *
+ * @handle - A handle for the analyzed request
+ * @first - The first node in the list of nodes to use
+ *
+ * Fills the supplied list of nodes with the parameters acquired by analyzing
+ * the request.
+ *
+ * All pointers to intermediate buffers are represented by integers to be used
+ * in the array returned by b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job, struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_assign_buffers() - Assignes physical addresses
+ *
+ * @handle - The handle for the job
+ * @first - The first node in the node list
+ * @bufs - Buffers with assigned physical addresses
+ * @buf_count - Number of physical addresses
+ *
+ * Assigns the physical addresses where intermediate buffers are required in
+ * the node list.
+ *
+ * The order of the elements of 'bufs' must be maintained from the call to
+ * b2r2_node_split_analyze.
+ *
+ * Returns:
+ * A negative value if an error occurred, 0 otherwise.
+ */
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first, struct b2r2_work_buf *bufs,
+ u32 buf_count);
+
+/**
+ * b2r2_node_split_unassign_buffers() - Removes all physical addresses
+ *
+ * @handle - The handle associated with the job
+ * @first - The first node in the node list
+ *
+ * Removes all references to intermediate buffers from the node list.
+ *
+ * This makes it possible to reuse the node list with new buffers by calling
+ * b2r2_node_split_assign_buffers again. Useful for caching node lists.
+ */
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
+ struct b2r2_node *first);
+
+/**
+ * b2r2_node_split_release() - Releases all resources for a job
+ *
+ * @handle - The handle identifying the job. This will be set to 0.
+ *
+ * Releases all resources associated with a job.
+ *
+ * This should always be called once b2r2_node_split_analyze has been called
+ * in order to release any resources allocated while analyzing.
+ */
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job);
+
+/**
+ * b2r2_node_split_init() - Initializes the node split module
+ *
+ * Initializes the node split module and creates debugfs files.
+ */
+int b2r2_node_split_init(struct b2r2_control *cont);
+
+/**
+ * b2r2_node_split_exit() - Deinitializes the node split module
+ *
+ * Releases all resources for the node split module.
+ */
+void b2r2_node_split_exit(struct b2r2_control *cont);
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_profiler/Makefile b/drivers/video/b2r2/b2r2_profiler/Makefile
new file mode 100644
index 00000000000..69a85524fd7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/Makefile
@@ -0,0 +1,3 @@
+# Make file for loadable module B2R2 Profiler
+
+obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler.o
diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
new file mode 100644
index 00000000000..e038941b4e8
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ *
+ * ST-Ericsson B2R2 profiler implementation
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+
+#include <video/b2r2_blt.h>
+#include "../b2r2_profiler_api.h"
+
+
+#define S32_MAX 2147483647
+
+
+static int src_format_filter_on = false;
+module_param(src_format_filter_on, bool, S_IRUGO | S_IWUSR);
+static unsigned int src_format_filter;
+module_param(src_format_filter, uint, S_IRUGO | S_IWUSR);
+
+static int print_blts_on = 0;
+module_param(print_blts_on, bool, S_IRUGO | S_IWUSR);
+static int use_mpix_per_second_in_print_blts = 1;
+module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR);
+
+static int profiler_stats_on = 1;
+module_param(profiler_stats_on, bool, S_IRUGO | S_IWUSR);
+
+static const unsigned int profiler_stats_blts_used = 400;
+static struct {
+ unsigned long sampling_start_time_jiffies;
+
+ s32 min_mpix_per_second;
+ struct b2r2_blt_req min_blt_request;
+ struct b2r2_blt_profiling_info min_blt_profiling_info;
+
+ s32 max_mpix_per_second;
+ struct b2r2_blt_req max_blt_request;
+ struct b2r2_blt_profiling_info max_blt_profiling_info;
+
+ s32 accumulated_num_pixels;
+ s32 accumulated_num_usecs;
+
+ u32 num_blts_done;
+} profiler_stats;
+
+
+static s32 nsec_2_usec(const s32 nsec);
+
+static int is_scale_blt(const struct b2r2_blt_req * const request);
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request);
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs);
+static void print_profiler_stats(void);
+static void reset_profiler_stats(void);
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+static void blt_done(const struct b2r2_blt_req * const blt,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+
+
+static struct b2r2_profiler this = {
+ .blt_done = blt_done,
+};
+
+
+static s32 nsec_2_usec(const s32 nsec)
+{
+ return nsec / 1000;
+}
+
+
+static int is_scale_blt(const struct b2r2_blt_req * const request)
+{
+ if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 &&
+ (request->src_rect.width !=
+ request->dst_rect.height ||
+ request->src_rect.height !=
+ request->dst_rect.width)) ||
+ (!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) &&
+ (request->src_rect.width !=
+ request->dst_rect.width ||
+ request->src_rect.height !=
+ request->dst_rect.height)))
+ return 1;
+ else
+ return 0;
+}
+
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ return get_mpix_per_second(get_num_pixels_in_blt(request),
+ nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2));
+}
+
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ char tmp_str[128];
+ sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i",
+ request->src_img.fmt,
+ request->dst_img.fmt,
+ request->flags,
+ request->transform,
+ is_scale_blt(request),
+ get_num_pixels_in_blt(request));
+ if (use_mpix_per_second_in_print_blts)
+ printk(KERN_ALERT "%s, MPix/s: %3i\n", tmp_str,
+ get_blt_mpix_per_second(request, blt_profiling_info));
+ else
+ printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n",
+ tmp_str, blt_profiling_info->nsec_active_in_cpu,
+ blt_profiling_info->nsec_active_in_b2r2,
+ blt_profiling_info->total_time_nsec);
+}
+
+
+static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request)
+{
+ s32 num_pixels_in_src = request->src_rect.width * request->src_rect.height;
+ s32 num_pixels_in_dst = request->dst_rect.width * request->dst_rect.height;
+ if (request->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW))
+ return num_pixels_in_dst;
+ else
+ return (num_pixels_in_src + num_pixels_in_dst) / 2;
+}
+
+static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
+{
+ s32 num_pixels_scale_factor = num_pixels != 0 ?
+ S32_MAX / num_pixels : S32_MAX;
+ s32 num_usecs_scale_factor = num_usecs != 0 ?
+ S32_MAX / num_usecs : S32_MAX;
+ s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor);
+
+ s32 num_pixels_scaled = num_pixels * scale_factor;
+ s32 num_usecs_scaled = num_usecs * scale_factor;
+
+ if (num_usecs_scaled < 1000000)
+ return 0;
+
+ return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000);
+}
+
+static void print_profiler_stats(void)
+{
+ printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n",
+ profiler_stats.min_mpix_per_second,
+ get_mpix_per_second(
+ profiler_stats.accumulated_num_pixels,
+ profiler_stats.accumulated_num_usecs),
+ profiler_stats.max_mpix_per_second);
+ printk(KERN_ALERT "Min blit:\n");
+ print_blt(&profiler_stats.min_blt_request,
+ &profiler_stats.min_blt_profiling_info);
+ printk(KERN_ALERT "Max blit:\n");
+ print_blt(&profiler_stats.max_blt_request,
+ &profiler_stats.max_blt_profiling_info);
+}
+
+static void reset_profiler_stats(void)
+{
+ profiler_stats.sampling_start_time_jiffies = jiffies;
+ profiler_stats.min_mpix_per_second = S32_MAX;
+ profiler_stats.max_mpix_per_second = 0;
+ profiler_stats.accumulated_num_pixels = 0;
+ profiler_stats.accumulated_num_usecs = 0;
+ profiler_stats.num_blts_done = 0;
+}
+
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ s32 num_pixels_in_blt;
+ s32 num_usec_blt_took;
+ s32 blt_mpix_per_second;
+
+ if (time_before(jiffies, profiler_stats.sampling_start_time_jiffies))
+ return;
+
+ num_pixels_in_blt = get_num_pixels_in_blt(request);
+ num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2);
+ blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt,
+ num_usec_blt_took);
+
+ if (blt_mpix_per_second <=
+ profiler_stats.min_mpix_per_second) {
+ profiler_stats.min_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.min_blt_request,
+ request, sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.min_blt_profiling_info,
+ blt_profiling_info,
+ sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ if (blt_mpix_per_second >= profiler_stats.max_mpix_per_second) {
+ profiler_stats.max_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.max_blt_request, request,
+ sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.max_blt_profiling_info,
+ blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ }
+
+ profiler_stats.accumulated_num_pixels += num_pixels_in_blt;
+ profiler_stats.accumulated_num_usecs += num_usec_blt_took;
+ profiler_stats.num_blts_done++;
+
+ if (profiler_stats.num_blts_done >= profiler_stats_blts_used) {
+ print_profiler_stats();
+ reset_profiler_stats();
+ /* The printouts initiated above can disturb the next measurement
+ so we delay it two seconds to give the printouts a chance to finish. */
+ profiler_stats.sampling_start_time_jiffies = jiffies + (2 * HZ);
+ }
+}
+
+static void blt_done(const struct b2r2_blt_req * const request,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
+{
+ /* Filters */
+ if (src_format_filter_on && request->src_img.fmt != src_format_filter)
+ return;
+
+ /* Processors */
+ if (print_blts_on)
+ print_blt(request, blt_profiling_info);
+
+ if (profiler_stats_on)
+ do_profiler_stats(request, blt_profiling_info);
+}
+
+
+static int __init b2r2_profiler_init(void)
+{
+ reset_profiler_stats();
+
+ return b2r2_register_profiler(&this);
+}
+module_init(b2r2_profiler_init);
+
+static void __exit b2r2_profiler_exit(void)
+{
+ b2r2_unregister_profiler(&this);
+}
+module_exit(b2r2_profiler_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Johan Mossberg (johan.xx.mossberg@stericsson.com)");
+MODULE_DESCRIPTION("B2R2 Profiler");
diff --git a/drivers/video/b2r2/b2r2_profiler_api.h b/drivers/video/b2r2/b2r2_profiler_api.h
new file mode 100644
index 00000000000..5f1f9abbe49
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_api.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiling API
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_API_H
+#define _LINUX_VIDEO_B2R2_PROFILER_API_H
+
+#include <video/b2r2_blt.h>
+
+/**
+ * struct b2r2_blt_profiling_info - Profiling information for a blit
+ *
+ * @nsec_active_in_cpu: The number of nanoseconds the job was active in the CPU.
+ * This is an approximate value, check out the code for more
+ * info.
+ * @nsec_active_in_b2r2: The number of nanoseconds the job was active in B2R2. This
+ * is an approximate value, check out the code for more info.
+ * @total_time_nsec: The total time the job took in nano seconds. Includes ideling.
+ */
+struct b2r2_blt_profiling_info {
+ s32 nsec_active_in_cpu;
+ s32 nsec_active_in_b2r2;
+ s32 total_time_nsec;
+};
+
+/**
+ * struct b2r2_profiler - B2R2 profiler.
+ *
+ * The callbacks are never run concurrently. No heavy stuff must be done in the
+ * callbacks as this might adversely affect the B2R2 driver. The callbacks must
+ * not call the B2R2 profiler API as this will cause a deadlock. If the callbacks
+ * call into the B2R2 driver care must be taken as deadlock situations can arise.
+ *
+ * @blt_done: Called when a blit has finished, timed out or been canceled.
+ */
+struct b2r2_profiler {
+ void (*blt_done)(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+};
+
+/**
+ * b2r2_register_profiler() - Registers a profiler.
+ *
+ * Currently only one profiler can be registered at any given time.
+ *
+ * @profiler: The profiler
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler);
+
+/**
+ * b2r2_unregister_profiler() - Unregisters a profiler.
+ *
+ * @profiler: The profiler
+ */
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler);
+
+#endif /* #ifdef _LINUX_VIDEO_B2R2_PROFILER_API_H */
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c
new file mode 100644
index 00000000000..ffa7f2870c8
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <asm/errno.h>
+
+#include "b2r2_profiler_api.h"
+#include "b2r2_internal.h"
+
+
+/*
+ * TODO: Call the profiler in a seperate thread and have a circular buffer
+ * between the B2R2 driver and that thread. That way the profiler can not slow
+ * down or kill the B2R2 driver. Seems a bit overkill right now as there is
+ * only one B2R2 profiler and we have full control over it but the situation
+ * may be different in the future.
+ */
+
+
+static const struct b2r2_profiler *b2r2_profiler;
+static DEFINE_SEMAPHORE(b2r2_profiler_lock);
+
+
+int b2r2_register_profiler(const struct b2r2_profiler * const profiler)
+{
+ int return_value;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0)
+ return return_value;
+
+ if (b2r2_profiler != NULL) {
+ return_value = -EUSERS;
+
+ goto cleanup;
+ }
+
+ b2r2_profiler = profiler;
+
+ return_value = 0;
+
+cleanup:
+ up(&b2r2_profiler_lock);
+
+ return return_value;
+}
+EXPORT_SYMBOL(b2r2_register_profiler);
+
+void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler)
+{
+ down(&b2r2_profiler_lock);
+
+ if (profiler == b2r2_profiler)
+ b2r2_profiler = NULL;
+
+ up(&b2r2_profiler_lock);
+}
+EXPORT_SYMBOL(b2r2_unregister_profiler);
+
+
+bool is_profiler_registered_approx(void)
+{
+ /* No locking by design, to make it fast, hence the approx */
+ if (b2r2_profiler != NULL)
+ return true;
+ else
+ return false;
+}
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request)
+{
+ int return_value;
+ struct b2r2_blt_profiling_info blt_profiling_info;
+ struct b2r2_control *cont = request->instance->control;
+
+ return_value = down_interruptible(&b2r2_profiler_lock);
+ if (return_value != 0) {
+ dev_err(cont->dev,
+ "%s: Failed to acquire semaphore, ret=%i. "
+ "Lost profiler call!\n", __func__, return_value);
+
+ return;
+ }
+
+ if (NULL == b2r2_profiler)
+ goto cleanup;
+
+ blt_profiling_info.nsec_active_in_cpu = request->nsec_active_in_cpu;
+ blt_profiling_info.nsec_active_in_b2r2 = request->job.nsec_active_in_hw;
+ blt_profiling_info.total_time_nsec = request->total_time_nsec;
+
+ b2r2_profiler->blt_done(&request->user_req, request->request_id, &blt_profiling_info);
+
+cleanup:
+ up(&b2r2_profiler_lock);
+}
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.h b/drivers/video/b2r2/b2r2_profiler_socket.h
new file mode 100644
index 00000000000..80b2c20293f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_profiler_socket.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 profiler socket communication
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+#define _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H
+
+#include "b2r2_internal.h"
+
+/* Will give a correct result most of the time but can be wrong */
+bool is_profiler_registered_approx(void);
+
+void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request);
+
+#endif /* _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H */
diff --git a/drivers/video/b2r2/b2r2_structures.h b/drivers/video/b2r2/b2r2_structures.h
new file mode 100644
index 00000000000..99fa7f047d3
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_structures.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 register struct
+ *
+ * Author: Robert Fekete <robert.fekete@stericsson.com>
+ * Author: Paul Wannback
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+
+#ifndef __B2R2_STRUCTURES_H
+#define __B2R2_STRUCTURES_H
+
+/* C struct view */
+struct b2r2_memory_map {
+ unsigned char fill0[2304];
+ unsigned int BLT_SSBA17; /* @2304 */
+ unsigned int BLT_SSBA18; /* @2308 */
+ unsigned int BLT_SSBA19; /* @2312 */
+ unsigned int BLT_SSBA20; /* @2316 */
+ unsigned int BLT_SSBA21; /* @2320 */
+ unsigned int BLT_SSBA22; /* @2324 */
+ unsigned int BLT_SSBA23; /* @2328 */
+ unsigned int BLT_SSBA24; /* @2332 */
+ unsigned char fill1[32];
+ unsigned int BLT_STBA5; /* @2368 */
+ unsigned int BLT_STBA6; /* @2372 */
+ unsigned int BLT_STBA7; /* @2376 */
+ unsigned int BLT_STBA8; /* @2380 */
+ unsigned char fill2[176];
+ unsigned int BLT_CTL; /* @2560 */
+ unsigned int BLT_ITS; /* @2564 */
+ unsigned int BLT_STA1; /* @2568 */
+ unsigned char fill3[4];
+ unsigned int BLT_SSBA1; /* @2576 */
+ unsigned int BLT_SSBA2; /* @2580 */
+ unsigned int BLT_SSBA3; /* @2584 */
+ unsigned int BLT_SSBA4; /* @2588 */
+ unsigned int BLT_SSBA5; /* @2592 */
+ unsigned int BLT_SSBA6; /* @2596 */
+ unsigned int BLT_SSBA7; /* @2600 */
+ unsigned int BLT_SSBA8; /* @2604 */
+ unsigned int BLT_STBA1; /* @2608 */
+ unsigned int BLT_STBA2; /* @2612 */
+ unsigned int BLT_STBA3; /* @2616 */
+ unsigned int BLT_STBA4; /* @2620 */
+ unsigned int BLT_CQ1_TRIG_IP; /* @2624 */
+ unsigned int BLT_CQ1_TRIG_CTL; /* @2628 */
+ unsigned int BLT_CQ1_PACE_CTL; /* @2632 */
+ unsigned int BLT_CQ1_IP; /* @2636 */
+ unsigned int BLT_CQ2_TRIG_IP; /* @2640 */
+ unsigned int BLT_CQ2_TRIG_CTL; /* @2644 */
+ unsigned int BLT_CQ2_PACE_CTL; /* @2648 */
+ unsigned int BLT_CQ2_IP; /* @2652 */
+ unsigned int BLT_AQ1_CTL; /* @2656 */
+ unsigned int BLT_AQ1_IP; /* @2660 */
+ unsigned int BLT_AQ1_LNA; /* @2664 */
+ unsigned int BLT_AQ1_STA; /* @2668 */
+ unsigned int BLT_AQ2_CTL; /* @2672 */
+ unsigned int BLT_AQ2_IP; /* @2676 */
+ unsigned int BLT_AQ2_LNA; /* @2680 */
+ unsigned int BLT_AQ2_STA; /* @2684 */
+ unsigned int BLT_AQ3_CTL; /* @2688 */
+ unsigned int BLT_AQ3_IP; /* @2692 */
+ unsigned int BLT_AQ3_LNA; /* @2696 */
+ unsigned int BLT_AQ3_STA; /* @2700 */
+ unsigned int BLT_AQ4_CTL; /* @2704 */
+ unsigned int BLT_AQ4_IP; /* @2708 */
+ unsigned int BLT_AQ4_LNA; /* @2712 */
+ unsigned int BLT_AQ4_STA; /* @2716 */
+ unsigned int BLT_SSBA9; /* @2720 */
+ unsigned int BLT_SSBA10; /* @2724 */
+ unsigned int BLT_SSBA11; /* @2728 */
+ unsigned int BLT_SSBA12; /* @2732 */
+ unsigned int BLT_SSBA13; /* @2736 */
+ unsigned int BLT_SSBA14; /* @2740 */
+ unsigned int BLT_SSBA15; /* @2744 */
+ unsigned int BLT_SSBA16; /* @2748 */
+ unsigned int BLT_SGA1; /* @2752 */
+ unsigned int BLT_SGA2; /* @2756 */
+ unsigned char fill4[8];
+ unsigned int BLT_ITM0; /* @2768 */
+ unsigned int BLT_ITM1; /* @2772 */
+ unsigned int BLT_ITM2; /* @2776 */
+ unsigned int BLT_ITM3; /* @2780 */
+ unsigned char fill5[16];
+ unsigned int BLT_DFV2; /* @2800 */
+ unsigned int BLT_DFV1; /* @2804 */
+ unsigned int BLT_PRI; /* @2808 */
+ unsigned char fill6[8];
+ unsigned int PLUGS1_OP2; /* @2820 */
+ unsigned int PLUGS1_CHZ; /* @2824 */
+ unsigned int PLUGS1_MSZ; /* @2828 */
+ unsigned int PLUGS1_PGZ; /* @2832 */
+ unsigned char fill7[16];
+ unsigned int PLUGS2_OP2; /* @2852 */
+ unsigned int PLUGS2_CHZ; /* @2856 */
+ unsigned int PLUGS2_MSZ; /* @2860 */
+ unsigned int PLUGS2_PGZ; /* @2864 */
+ unsigned char fill8[16];
+ unsigned int PLUGS3_OP2; /* @2884 */
+ unsigned int PLUGS3_CHZ; /* @2888 */
+ unsigned int PLUGS3_MSZ; /* @2892 */
+ unsigned int PLUGS3_PGZ; /* @2896 */
+ unsigned char fill9[48];
+ unsigned int PLUGT_OP2; /* @2948 */
+ unsigned int PLUGT_CHZ; /* @2952 */
+ unsigned int PLUGT_MSZ; /* @2956 */
+ unsigned int PLUGT_PGZ; /* @2960 */
+ unsigned char fill10[108];
+ unsigned int BLT_NIP; /* @3072 */
+ unsigned int BLT_CIC; /* @3076 */
+ unsigned int BLT_INS; /* @3080 */
+ unsigned int BLT_ACK; /* @3084 */
+ unsigned int BLT_TBA; /* @3088 */
+ unsigned int BLT_TTY; /* @3092 */
+ unsigned int BLT_TXY; /* @3096 */
+ unsigned int BLT_TSZ; /* @3100 */
+ unsigned int BLT_S1CF; /* @3104 */
+ unsigned int BLT_S2CF; /* @3108 */
+ unsigned int BLT_S1BA; /* @3112 */
+ unsigned int BLT_S1TY; /* @3116 */
+ unsigned int BLT_S1XY; /* @3120 */
+ unsigned char fill11[4];
+ unsigned int BLT_S2BA; /* @3128 */
+ unsigned int BLT_S2TY; /* @3132 */
+ unsigned int BLT_S2XY; /* @3136 */
+ unsigned int BLT_S2SZ; /* @3140 */
+ unsigned int BLT_S3BA; /* @3144 */
+ unsigned int BLT_S3TY; /* @3148 */
+ unsigned int BLT_S3XY; /* @3152 */
+ unsigned int BLT_S3SZ; /* @3156 */
+ unsigned int BLT_CWO; /* @3160 */
+ unsigned int BLT_CWS; /* @3164 */
+ unsigned int BLT_CCO; /* @3168 */
+ unsigned int BLT_CML; /* @3172 */
+ unsigned int BLT_FCTL; /* @3176 */
+ unsigned int BLT_PMK; /* @3180 */
+ unsigned int BLT_RSF; /* @3184 */
+ unsigned int BLT_RZI; /* @3188 */
+ unsigned int BLT_HFP; /* @3192 */
+ unsigned int BLT_VFP; /* @3196 */
+ unsigned int BLT_Y_RSF; /* @3200 */
+ unsigned int BLT_Y_RZI; /* @3204 */
+ unsigned int BLT_Y_HFP; /* @3208 */
+ unsigned int BLT_Y_VFP; /* @3212 */
+ unsigned char fill12[16];
+ unsigned int BLT_KEY1; /* @3232 */
+ unsigned int BLT_KEY2; /* @3236 */
+ unsigned char fill13[8];
+ unsigned int BLT_SAR; /* @3248 */
+ unsigned int BLT_USR; /* @3252 */
+ unsigned char fill14[8];
+ unsigned int BLT_IVMX0; /* @3264 */
+ unsigned int BLT_IVMX1; /* @3268 */
+ unsigned int BLT_IVMX2; /* @3272 */
+ unsigned int BLT_IVMX3; /* @3276 */
+ unsigned int BLT_OVMX0; /* @3280 */
+ unsigned int BLT_OVMX1; /* @3284 */
+ unsigned int BLT_OVMX2; /* @3288 */
+ unsigned int BLT_OVMX3; /* @3292 */
+ unsigned char fill15[8];
+ unsigned int BLT_VC1R; /* @3304 */
+ unsigned char fill16[20];
+ unsigned int BLT_Y_HFC0; /* @3328 */
+ unsigned int BLT_Y_HFC1; /* @3332 */
+ unsigned int BLT_Y_HFC2; /* @3336 */
+ unsigned int BLT_Y_HFC3; /* @3340 */
+ unsigned int BLT_Y_HFC4; /* @3344 */
+ unsigned int BLT_Y_HFC5; /* @3348 */
+ unsigned int BLT_Y_HFC6; /* @3352 */
+ unsigned int BLT_Y_HFC7; /* @3356 */
+ unsigned int BLT_Y_HFC8; /* @3360 */
+ unsigned int BLT_Y_HFC9; /* @3364 */
+ unsigned int BLT_Y_HFC10; /* @3368 */
+ unsigned int BLT_Y_HFC11; /* @3372 */
+ unsigned int BLT_Y_HFC12; /* @3376 */
+ unsigned int BLT_Y_HFC13; /* @3380 */
+ unsigned int BLT_Y_HFC14; /* @3384 */
+ unsigned int BLT_Y_HFC15; /* @3388 */
+ unsigned char fill17[80];
+ unsigned int BLT_Y_VFC0; /* @3472 */
+ unsigned int BLT_Y_VFC1; /* @3476 */
+ unsigned int BLT_Y_VFC2; /* @3480 */
+ unsigned int BLT_Y_VFC3; /* @3484 */
+ unsigned int BLT_Y_VFC4; /* @3488 */
+ unsigned int BLT_Y_VFC5; /* @3492 */
+ unsigned int BLT_Y_VFC6; /* @3496 */
+ unsigned int BLT_Y_VFC7; /* @3500 */
+ unsigned int BLT_Y_VFC8; /* @3504 */
+ unsigned int BLT_Y_VFC9; /* @3508 */
+ unsigned char fill18[72];
+ unsigned int BLT_HFC0; /* @3584 */
+ unsigned int BLT_HFC1; /* @3588 */
+ unsigned int BLT_HFC2; /* @3592 */
+ unsigned int BLT_HFC3; /* @3596 */
+ unsigned int BLT_HFC4; /* @3600 */
+ unsigned int BLT_HFC5; /* @3604 */
+ unsigned int BLT_HFC6; /* @3608 */
+ unsigned int BLT_HFC7; /* @3612 */
+ unsigned int BLT_HFC8; /* @3616 */
+ unsigned int BLT_HFC9; /* @3620 */
+ unsigned int BLT_HFC10; /* @3624 */
+ unsigned int BLT_HFC11; /* @3628 */
+ unsigned int BLT_HFC12; /* @3632 */
+ unsigned int BLT_HFC13; /* @3636 */
+ unsigned int BLT_HFC14; /* @3640 */
+ unsigned int BLT_HFC15; /* @3644 */
+ unsigned char fill19[80];
+ unsigned int BLT_VFC0; /* @3728 */
+ unsigned int BLT_VFC1; /* @3732 */
+ unsigned int BLT_VFC2; /* @3736 */
+ unsigned int BLT_VFC3; /* @3740 */
+ unsigned int BLT_VFC4; /* @3744 */
+ unsigned int BLT_VFC5; /* @3748 */
+ unsigned int BLT_VFC6; /* @3752 */
+ unsigned int BLT_VFC7; /* @3756 */
+ unsigned int BLT_VFC8; /* @3760 */
+ unsigned int BLT_VFC9; /* @3764 */
+};
+
+#endif /* !defined(__B2R2_STRUCTURES_H) */
+
diff --git a/drivers/video/b2r2/b2r2_timing.c b/drivers/video/b2r2/b2r2_timing.c
new file mode 100644
index 00000000000..4f3e2b8b042
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/time.h>
+
+
+u32 b2r2_get_curr_nsec(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+
+ return (u32)timespec_to_ns(&ts);
+}
diff --git a/drivers/video/b2r2/b2r2_timing.h b/drivers/video/b2r2/b2r2_timing.h
new file mode 100644
index 00000000000..e87113c0ec9
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_timing.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 timing
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_
+
+/**
+ * b2r2_get_curr_nsec() - Return the current nanosecond. Notice that the value
+ * wraps when the u32 limit is reached.
+ *
+ */
+u32 b2r2_get_curr_nsec(void);
+
+#endif /* _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ */
diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c
new file mode 100644
index 00000000000..3df7a272211
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.c
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_utils.h"
+#include "b2r2_debug.h"
+#include "b2r2_internal.h"
+
+const s32 b2r2_s32_max = 2147483647;
+
+
+/**
+ * calculate_scale_factor() - calculates the scale factor between the given
+ * values
+ */
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out)
+{
+ int ret;
+ u32 sf;
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
+
+ if (to == from) {
+ *sf_out = 1 << 10;
+ return 0;
+ } else if (to == 0) {
+ b2r2_log_err(cont->dev, "%s: To is 0!\n", __func__);
+ BUG_ON(1);
+ }
+
+ sf = (from << 10) / to;
+
+ if ((sf & 0xffff0000) != 0) {
+ /* Overflow error */
+ b2r2_log_warn(cont->dev, "%s: "
+ "Scale factor too large\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ } else if (sf == 0) {
+ b2r2_log_warn(cont->dev, "%s: "
+ "Scale factor too small\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ *sf_out = (u16)sf;
+
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
+
+ return 0;
+
+error:
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
+ return ret;
+}
+
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect)
+{
+ bounding_rect->x = 0;
+ bounding_rect->y = 0;
+ bounding_rect->width = img->width;
+ bounding_rect->height = img->height;
+}
+
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect)
+{
+ return rect->width == 0 || rect->height == 0;
+}
+
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->x >= rect2->x &&
+ rect1->y >= rect2->y &&
+ rect1->x + rect1->width <= rect2->x + rect2->width &&
+ rect1->y + rect1->height <= rect2->y + rect2->height;
+}
+
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2)
+{
+ return rect1->width >= rect2->width &&
+ rect1->height >= rect2->height;
+}
+
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection)
+{
+ struct b2r2_blt_rect tmp_rect;
+
+ tmp_rect.x = max(rect1->x, rect2->x);
+ tmp_rect.y = max(rect1->y, rect2->y);
+ tmp_rect.width = min(rect1->x + rect1->width, rect2->x + rect2->width)
+ - tmp_rect.x;
+ if (tmp_rect.width < 0)
+ tmp_rect.width = 0;
+ tmp_rect.height =
+ min(rect1->y + rect1->height, rect2->y + rect2->height) -
+ tmp_rect.y;
+ if (tmp_rect.height < 0)
+ tmp_rect.height = 0;
+
+ *intersection = tmp_rect;
+}
+
+/*
+ * Calculate new rectangles for the supplied
+ * request, so that clipping to destination imaage
+ * can be avoided.
+ * Essentially, the new destination rectangle is
+ * defined inside the old one. Given the transform
+ * and scaling, one has to calculate which part of
+ * the old source rectangle corresponds to
+ * to the new part of old destination rectangle.
+ */
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect)
+{
+ enum b2r2_blt_transform transform = req->transform;
+ struct b2r2_blt_rect *old_src_rect =
+ (struct b2r2_blt_rect *) &req->src_rect;
+ struct b2r2_blt_rect *old_dst_rect =
+ (struct b2r2_blt_rect *) &req->dst_rect;
+ struct b2r2_blt_rect *old_bg_rect =
+ (struct b2r2_blt_rect *) &req->bg_rect;
+ struct b2r2_blt_rect dst_img_bounds;
+ s32 src_x = 0;
+ s32 src_y = 0;
+ s32 src_w = 0;
+ s32 src_h = 0;
+ s32 dx = 0;
+ s32 dy = 0;
+ s16 hsf;
+ s16 vsf;
+
+ b2r2_log_info(cont->dev,
+ "%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_dst_rect->x, old_dst_rect->y,
+ old_dst_rect->width, old_dst_rect->height);
+ b2r2_log_info(cont->dev,
+ "%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ old_src_rect->x, old_src_rect->y,
+ old_src_rect->width, old_src_rect->height);
+
+ b2r2_get_img_bounding_rect((struct b2r2_blt_img *) &req->dst_img,
+ &dst_img_bounds);
+
+ /* dst_rect inside dst_img, no clipping necessary */
+ if (b2r2_is_rect_inside_rect(old_dst_rect, &dst_img_bounds))
+ goto keep_rects;
+
+ b2r2_intersect_rects(old_dst_rect, &dst_img_bounds, new_dst_rect);
+ b2r2_log_info(cont->dev,
+ "%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_dst_rect->x, new_dst_rect->y,
+ new_dst_rect->width, new_dst_rect->height);
+
+ /* dst_rect completely outside, leave it to validation */
+ if (new_dst_rect->width == 0 || new_dst_rect->height == 0)
+ goto keep_rects;
+
+ dx = new_dst_rect->x - old_dst_rect->x;
+ dy = new_dst_rect->y - old_dst_rect->y;
+
+ if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
+ int res = 0;
+ res = calculate_scale_factor(cont, old_src_rect->width,
+ old_dst_rect->height, &hsf);
+ /* invalid dimensions, leave them to validation */
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(cont, old_src_rect->height,
+ old_dst_rect->width, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ /*
+ * After applying the inverse transform
+ * for 90 degree rotation, the top-left corner
+ * becomes top-right.
+ * src_rect origin is defined as top-left,
+ * so a translation between dst and src
+ * coordinate spaces is necessary.
+ */
+ src_x = (old_src_rect->width << 10) -
+ hsf * (dy + new_dst_rect->height);
+ src_y = dx * vsf;
+ src_w = new_dst_rect->height * hsf;
+ src_h = new_dst_rect->width * vsf;
+ } else {
+ int res = 0;
+ res = calculate_scale_factor(cont, old_src_rect->width,
+ old_dst_rect->width, &hsf);
+ if (res < 0)
+ goto keep_rects;
+
+ res = calculate_scale_factor(cont, old_src_rect->height,
+ old_dst_rect->height, &vsf);
+ if (res < 0)
+ goto keep_rects;
+
+ src_x = dx * hsf;
+ src_y = dy * vsf;
+ src_w = new_dst_rect->width * hsf;
+ src_h = new_dst_rect->height * vsf;
+ }
+
+ /*
+ * src_w must contain all the pixels that contribute
+ * to a particular destination rectangle.
+ * ((x + 0x3ff) >> 10) is equivalent to ceiling(x),
+ * expressed in 6.10 fixed point format.
+ * Every destination rectangle, maps to a certain area in the source
+ * rectangle. The area in source will most likely not be a rectangle
+ * with exact integer dimensions whenever arbitrary scaling is involved.
+ * Consider the following example.
+ * Suppose, that width of the current destination rectangle maps
+ * to 1.7 pixels in source, starting at x == 5.4, as calculated
+ * using the scaling factor.
+ * This means that while the destination rectangle is written,
+ * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1
+ * Consequently, color from 3 pixels (x == 5, 6 and 7)
+ * needs to be read from source.
+ * The formula below the comment yields:
+ * ceil(0.4 + 1.7) == ceil(2.1) == 3
+ * (src_x & 0x3ff) is the fractional part of src_x,
+ * which is expressed in 6.10 fixed point format.
+ * Thus, width of the source area should be 3 pixels wide,
+ * starting at x == 5.
+ */
+ src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10;
+ src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10;
+
+ src_x >>= 10;
+ src_y >>= 10;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_H)
+ src_x = old_src_rect->width - src_x - src_w;
+
+ if (transform & B2R2_BLT_TRANSFORM_FLIP_V)
+ src_y = old_src_rect->height - src_y - src_h;
+
+ /*
+ * Translate the src_rect coordinates into true
+ * src_buffer coordinates.
+ */
+ src_x += old_src_rect->x;
+ src_y += old_src_rect->y;
+
+ new_src_rect->x = src_x;
+ new_src_rect->y = src_y;
+ new_src_rect->width = src_w;
+ new_src_rect->height = src_h;
+
+ b2r2_log_info(cont->dev,
+ "%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ new_src_rect->x, new_src_rect->y,
+ new_src_rect->width, new_src_rect->height);
+
+ if (req->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ /* Modify bg_rect in the same way as dst_rect */
+ s32 dw = new_dst_rect->width - old_dst_rect->width;
+ s32 dh = new_dst_rect->height - old_dst_rect->height;
+ b2r2_log_info(cont->dev,
+ "%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, old_bg_rect->x, old_bg_rect->y,
+ old_bg_rect->width, old_bg_rect->height);
+ new_bg_rect->x = old_bg_rect->x + dx;
+ new_bg_rect->y = old_bg_rect->y + dy;
+ new_bg_rect->width = old_bg_rect->width + dw;
+ new_bg_rect->height = old_bg_rect->height + dh;
+ b2r2_log_info(cont->dev,
+ "%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ __func__, new_bg_rect->x, new_bg_rect->y,
+ new_bg_rect->width, new_bg_rect->height);
+ }
+ return;
+keep_rects:
+ /*
+ * Recalculation was not possible, or not necessary.
+ * Do not change anything, leave it to validation.
+ */
+ *new_src_rect = *old_src_rect;
+ *new_dst_rect = *old_dst_rect;
+ *new_bg_rect = *old_bg_rect;
+ b2r2_log_info(cont->dev, "%s original rectangles preserved.\n", __func__);
+ return;
+}
+
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
+{
+ /*
+ * Currently this function is not used that often but if that changes a
+ * lookup table could make it a lot faster.
+ */
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ return 1;
+
+ case B2R2_BLT_FMT_8_BIT_A8:
+ return 8;
+
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return 12;
+
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return 16;
+
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return 24;
+
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 32;
+
+ default:
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Format %#x not recognized.\n",
+ __func__, fmt);
+ return 32;
+ }
+}
+
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return 8;
+
+ default:
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Non YCbCr format supplied.\n",
+ __func__);
+ return 8;
+ }
+}
+
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_1_BIT_A1:
+ case B2R2_BLT_FMT_8_BIT_A8:
+ case B2R2_BLT_FMT_16_BIT_ARGB4444:
+ case B2R2_BLT_FMT_16_BIT_ARGB1555:
+ case B2R2_BLT_FMT_16_BIT_RGB565:
+ case B2R2_BLT_FMT_24_BIT_RGB888:
+ case B2R2_BLT_FMT_24_BIT_ARGB8565:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_ARGB8888:
+ case B2R2_BLT_FMT_32_BIT_ABGR8888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_Y_CB_Y_CR:
+ case B2R2_BLT_FMT_CB_Y_CR_Y:
+ case B2R2_BLT_FMT_YUV422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV444_PACKED_PLANAR:
+ case B2R2_BLT_FMT_32_BIT_AYUV8888:
+ case B2R2_BLT_FMT_24_BIT_YUV888:
+ case B2R2_BLT_FMT_32_BIT_VUYA8888:
+ case B2R2_BLT_FMT_24_BIT_VUY888:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt)
+{
+ switch (fmt) {
+ case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
+ case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt)
+{
+ if (b2r2_is_single_plane_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_bpp(cont, fmt), 8);
+ } else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) {
+ return (u32)b2r2_div_round_up(width *
+ b2r2_get_fmt_y_bpp(cont, fmt), 8);
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Pitchless format supplied.\n",
+ __func__);
+ return 0;
+ }
+}
+
+u32 b2r2_get_img_pitch(struct b2r2_control *cont, struct b2r2_blt_img *img)
+{
+ if (img->pitch != 0)
+ return img->pitch;
+ else
+ return b2r2_calc_pitch_from_width(cont, img->width, img->fmt);
+}
+
+s32 b2r2_get_img_size(struct b2r2_control *cont, struct b2r2_blt_img *img)
+{
+ if (b2r2_is_single_plane_fmt(img->fmt)) {
+ return (s32)b2r2_get_img_pitch(cont, img) * img->height;
+ } else if (b2r2_is_ycbcrsp_fmt(img->fmt) ||
+ b2r2_is_ycbcrp_fmt(img->fmt)) {
+ s32 y_plane_size;
+
+ y_plane_size = (s32)b2r2_get_img_pitch(cont, img) * img->height;
+
+ if (b2r2_is_ycbcr420_fmt(img->fmt)) {
+ return y_plane_size + y_plane_size / 2;
+ } else if (b2r2_is_ycbcr422_fmt(img->fmt)) {
+ return y_plane_size * 2;
+ } else if (b2r2_is_ycbcr444_fmt(img->fmt)) {
+ return y_plane_size * 3;
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error!"
+ " Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+ } else if (b2r2_is_mb_fmt(img->fmt)) {
+ return (img->width * img->height *
+ b2r2_get_fmt_bpp(cont, img->fmt)) / 8;
+ } else {
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
+ return 0;
+ }
+}
+
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor)
+{
+ s32 quotient = dividend / divisor;
+ if (dividend % divisor != 0)
+ quotient++;
+
+ return quotient;
+}
+
+bool b2r2_is_aligned(s32 value, s32 alignment)
+{
+ return value % alignment == 0;
+}
+
+s32 b2r2_align_up(s32 value, s32 alignment)
+{
+ s32 remainder = abs(value) % abs(alignment);
+ s32 value_to_add;
+
+ if (remainder > 0) {
+ if (value >= 0)
+ value_to_add = alignment - remainder;
+ else
+ value_to_add = remainder;
+ } else {
+ value_to_add = 0;
+ }
+
+ return value + value_to_add;
+}
diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h
new file mode 100644
index 00000000000..0516447b42f
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_utils.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * ST-Ericsson B2R2 utils
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_
+
+#include <video/b2r2_blt.h>
+
+#include "b2r2_internal.h"
+
+extern const s32 b2r2_s32_max;
+
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out);
+void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *bounding_rect);
+
+bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect);
+bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2);
+void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
+ struct b2r2_blt_rect *rect2,
+ struct b2r2_blt_rect *intersection);
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
+ struct b2r2_blt_rect *new_bg_rect,
+ struct b2r2_blt_rect *new_dst_rect,
+ struct b2r2_blt_rect *new_src_rect);
+
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
+
+bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt);
+bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt);
+
+/*
+ * Rounds up if an invalid width causes the pitch to be non byte aligned.
+ */
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt);
+u32 b2r2_get_img_pitch(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+s32 b2r2_get_img_size(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+
+s32 b2r2_div_round_up(s32 dividend, s32 divisor);
+bool b2r2_is_aligned(s32 value, s32 alignment);
+s32 b2r2_align_up(s32 value, s32 alignment);
+
+#endif