summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c1427
-rw-r--r--drivers/video/b2r2/b2r2_core.c1407
-rw-r--r--drivers/video/b2r2/b2r2_core.h132
-rw-r--r--drivers/video/b2r2/b2r2_debug.c141
-rw-r--r--drivers/video/b2r2/b2r2_debug.h32
-rw-r--r--drivers/video/b2r2/b2r2_filters.c58
-rw-r--r--drivers/video/b2r2/b2r2_filters.h6
-rw-r--r--drivers/video/b2r2/b2r2_generic.c343
-rw-r--r--drivers/video/b2r2/b2r2_generic.h4
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.c139
-rw-r--r--drivers/video/b2r2/b2r2_input_validation.h5
-rw-r--r--drivers/video/b2r2/b2r2_internal.h352
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.c374
-rw-r--r--drivers/video/b2r2/b2r2_mem_alloc.h53
-rw-r--r--drivers/video/b2r2/b2r2_node_gen.c37
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c655
-rw-r--r--drivers/video/b2r2/b2r2_node_split.h17
-rw-r--r--drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c152
-rw-r--r--drivers/video/b2r2/b2r2_profiler_socket.c7
-rw-r--r--drivers/video/b2r2/b2r2_utils.c104
-rw-r--r--drivers/video/b2r2/b2r2_utils.h21
21 files changed, 2895 insertions, 2571 deletions
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
index 51087e4b437..f79bfaee9ab 100644
--- a/drivers/video/b2r2/b2r2_blt_main.c
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -44,6 +44,8 @@
#include "b2r2_debug.h"
#include "b2r2_utils.h"
#include "b2r2_input_validation.h"
+#include "b2r2_core.h"
+#include "b2r2_filters.h"
#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE)
@@ -59,93 +61,22 @@
*/
/**
- * b2r2_blt_dev - Our device, /dev/b2r2_blt
+ * b2r2_blt_dev - Our device(s), /dev/b2r2_blt
*/
-static struct miscdevice *b2r2_blt_dev;
-
-static struct {
- struct b2r2_work_buf buf;
- bool in_use;
-} tmp_bufs[MAX_TMP_BUFS_NEEDED];
-
-/* Statistics */
-
-/**
- * stat_lock - Spin lock protecting the statistics
- */
-static struct mutex stat_lock;
-/**
- * stat_n_jobs_added - Number of jobs added to b2r2_core
- */
-static unsigned long stat_n_jobs_added;
-/**
- * stat_n_jobs_released - Number of jobs released (job_release called)
- */
-static unsigned long stat_n_jobs_released;
-/**
- * stat_n_jobs_in_report_list - Number of jobs currently in the report list
- */
-static unsigned long stat_n_jobs_in_report_list;
-/**
- * stat_n_in_blt - Number of client threads currently exec inside b2r2_blt()
- */
-static unsigned long stat_n_in_blt;
-/**
- * stat_n_in_blt_synch - Nunmber of client threads currently waiting for synch
- */
-static unsigned long stat_n_in_blt_synch;
-/**
- * stat_n_in_blt_add - Number of client threads currenlty adding in b2r2_blt
- */
-static unsigned long stat_n_in_blt_add;
-/**
- * stat_n_in_blt_wait - Number of client threads currently waiting in b2r2_blt
- */
-static unsigned long stat_n_in_blt_wait;
-/**
- * stat_n_in_sync_0 - Number of client threads currently in b2r2_blt_sync
- * waiting for all client jobs to finish
- */
-static unsigned long stat_n_in_synch_0;
-/**
- * stat_n_in_sync_job - Number of client threads currently in b2r2_blt_sync
- * waiting specific job to finish
- */
-static unsigned long stat_n_in_synch_job;
-/**
- * stat_n_in_query_cap - Number of clients currently in query cap
- */
-static unsigned long stat_n_in_query_cap;
-/**
- * stat_n_in_open - Number of clients currently in b2r2_blt_open
- */
-static unsigned long stat_n_in_open;
-/**
- * stat_n_in_release - Number of clients currently in b2r2_blt_release
- */
-static unsigned long stat_n_in_release;
+static struct b2r2_control *b2r2_ctl[B2R2_MAX_NBR_DEVICES];
/* Debug file system support */
#ifdef CONFIG_DEBUG_FS
-/**
- * debugfs_latest_request - Copy of the latest request issued
- */
-struct b2r2_blt_request debugfs_latest_request;
-/**
- * debugfs_root_dir - The debugfs root directory, i.e. /debugfs/b2r2
- */
-static struct dentry *debugfs_root_dir;
-
static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size);
#endif
/* Local functions */
-static void inc_stat(unsigned long *stat);
-static void dec_stat(unsigned long *stat);
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat);
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat);
static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
- int request_id);
+ int request_id);
static int b2r2_blt_query_cap(struct b2r2_blt_instance *instance,
- struct b2r2_blt_query_cap *query_cap);
+ struct b2r2_blt_query_cap *query_cap);
#ifndef CONFIG_B2R2_GENERIC_ONLY
static int b2r2_blt(struct b2r2_blt_instance *instance,
@@ -170,25 +101,24 @@ static void tile_job_release_gen(struct b2r2_core_job *job);
#endif
-static int resolve_buf(struct b2r2_blt_img *img,
- struct b2r2_blt_rect *rect_2b_used,
- bool is_dst,
- struct b2r2_resolved_buf *resolved);
-static void unresolve_buf(struct b2r2_blt_buf *buf,
- struct b2r2_resolved_buf *resolved);
-static void sync_buf(struct b2r2_blt_img *img,
- struct b2r2_resolved_buf *resolved,
- bool is_dst,
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect_2b_used,
+ bool is_dst, struct b2r2_resolved_buf *resolved);
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf, struct b2r2_resolved_buf *resolved);
+static void sync_buf(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_resolved_buf *resolved, bool is_dst,
struct b2r2_blt_rect *rect);
static bool is_report_list_empty(struct b2r2_blt_instance *instance);
static bool is_synching(struct b2r2_blt_instance *instance);
static void get_actual_dst_rect(struct b2r2_blt_req *req,
- struct b2r2_blt_rect *actual_dst_rect);
-static void set_up_hwmem_region(struct b2r2_blt_img *img,
- struct b2r2_blt_rect *rect, struct hwmem_region *region);
-static int resolve_hwmem(struct b2r2_blt_img *img,
- struct b2r2_blt_rect *rect_2b_used, bool is_dst,
- struct b2r2_resolved_buf *resolved_buf);
+ struct b2r2_blt_rect *actual_dst_rect);
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region);
+static int resolve_hwmem(struct b2r2_control *cont, struct b2r2_blt_img *img,
+ struct b2r2_blt_rect *rect_2b_used, bool is_dst,
+ struct b2r2_resolved_buf *resolved_buf);
static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf);
/**
@@ -202,7 +132,8 @@ struct sync_args {
unsigned long end;
};
/**
- * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the current CPU
+ * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the
+ * current CPU
*
* @arg: Pointer to sync_args structure
*/
@@ -269,16 +200,17 @@ static int b2r2_blt_open(struct inode *inode, struct file *filp)
{
int ret = 0;
struct b2r2_blt_instance *instance;
+ struct b2r2_control *cont = filp->private_data;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
- inc_stat(&stat_n_in_open);
+ inc_stat(cont, &cont->stat_n_in_open);
/* Allocate and initialize the instance */
instance = (struct b2r2_blt_instance *)
kmalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
- b2r2_log_err("%s: Failed to alloc\n", __func__);
+ b2r2_log_err(cont->dev, "%s: Failed to alloc\n", __func__);
goto instance_alloc_failed;
}
memset(instance, 0, sizeof(*instance));
@@ -286,6 +218,7 @@ static int b2r2_blt_open(struct inode *inode, struct file *filp)
mutex_init(&instance->lock);
init_waitqueue_head(&instance->report_list_waitq);
init_waitqueue_head(&instance->synch_done_waitq);
+ instance->control = cont;
/*
* Remember the instance so that we can retrieve it in
@@ -296,7 +229,7 @@ static int b2r2_blt_open(struct inode *inode, struct file *filp)
instance_alloc_failed:
out:
- dec_stat(&stat_n_in_open);
+ dec_stat(cont, &cont->stat_n_in_open);
return ret;
}
@@ -314,39 +247,40 @@ out:
static int b2r2_blt_release(struct inode *inode, struct file *filp)
{
int ret;
- struct b2r2_blt_instance *instance;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+ struct b2r2_control *cont = instance->control;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
- inc_stat(&stat_n_in_release);
-
- instance = (struct b2r2_blt_instance *) filp->private_data;
+ inc_stat(cont, &cont->stat_n_in_release);
/* Finish all outstanding requests */
ret = b2r2_blt_synch(instance, 0);
if (ret < 0)
- b2r2_log_warn(
- "%s: b2r2_blt_sync failed with %d\n", __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: b2r2_blt_sync failed with %d\n",
+ __func__, ret);
/* Now cancel any remaining outstanding request */
if (instance->no_of_active_requests) {
struct b2r2_core_job *job;
- b2r2_log_warn("%s: %d active requests\n",
- __func__, instance->no_of_active_requests);
+ b2r2_log_warn(cont->dev, "%s: %d active requests\n", __func__,
+ instance->no_of_active_requests);
/* Find and cancel all jobs belonging to us */
- job = b2r2_core_job_find_first_with_tag((int) instance);
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
while (job) {
b2r2_core_job_cancel(job);
/* Matches addref in b2r2_core_job_find... */
b2r2_core_job_release(job, __func__);
- job = b2r2_core_job_find_first_with_tag((int) instance);
+ job = b2r2_core_job_find_first_with_tag(cont,
+ (int) instance);
}
- b2r2_log_warn(
- "%s: %d active requests after cancel\n",
- __func__, instance->no_of_active_requests);
+ b2r2_log_warn(cont->dev, "%s: %d active requests after "
+ "cancel\n", __func__, instance->no_of_active_requests);
}
/* Release jobs in report list */
@@ -370,7 +304,7 @@ static int b2r2_blt_release(struct inode *inode, struct file *filp)
/* Release our instance */
kfree(instance);
- dec_stat(&stat_n_in_release);
+ dec_stat(cont, &cont->stat_n_in_release);
return 0;
}
@@ -388,15 +322,14 @@ static long b2r2_blt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret = 0;
- struct b2r2_blt_instance *instance;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) file->private_data;
+ struct b2r2_control *cont = instance->control;
/** Process actual ioctl */
-
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Get the instance from the file structure */
- instance = (struct b2r2_blt_instance *) file->private_data;
-
switch (cmd) {
case B2R2_BLT_IOC: {
/* This is the "blit" command */
@@ -405,7 +338,7 @@ static long b2r2_blt_ioctl(struct file *file,
struct b2r2_blt_request *request =
kmalloc(sizeof(*request), GFP_KERNEL);
if (!request) {
- b2r2_log_err("%s: Failed to alloc mem\n",
+ b2r2_log_err(cont->dev, "%s: Failed to alloc mem\n",
__func__);
return -ENOMEM;
}
@@ -423,14 +356,13 @@ static long b2r2_blt_ioctl(struct file *file,
/* Get the user data */
if (copy_from_user(&request->user_req, (void *)arg,
sizeof(request->user_req))) {
- b2r2_log_err(
- "%s: copy_from_user failed\n",
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
__func__);
kfree(request);
return -EFAULT;
}
- if (!b2r2_validate_user_req(&request->user_req)) {
+ if (!b2r2_validate_user_req(cont, &request->user_req)) {
kfree(request);
return -EINVAL;
}
@@ -443,22 +375,24 @@ static long b2r2_blt_ioctl(struct file *file,
*/
if ((request->user_req.flags &
B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) {
- request->clut = dma_alloc_coherent(b2r2_blt_device(),
+ request->clut = dma_alloc_coherent(cont->dev,
CLUT_SIZE, &(request->clut_phys_addr),
GFP_DMA | GFP_KERNEL);
if (request->clut == NULL) {
- b2r2_log_err("%s CLUT allocation failed.\n",
- __func__);
+ b2r2_log_err(cont->dev, "%s CLUT allocation "
+ "failed.\n", __func__);
kfree(request);
return -ENOMEM;
}
if (copy_from_user(request->clut,
request->user_req.clut, CLUT_SIZE)) {
- b2r2_log_err("%s: CLUT copy_from_user failed\n",
+ b2r2_log_err(cont->dev, "%s: CLUT "
+ "copy_from_user failed\n",
__func__);
- dma_free_coherent(b2r2_blt_device(), CLUT_SIZE,
- request->clut, request->clut_phys_addr);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request->clut,
+ request->clut_phys_addr);
request->clut = NULL;
request->clut_phys_addr = 0;
kfree(request);
@@ -482,16 +416,20 @@ static long b2r2_blt_ioctl(struct file *file,
struct b2r2_blt_request *request_gen;
if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- /* No support for BG BLEND in generic implementation yet */
- b2r2_log_warn("%s: Unsupported: Background blend in b2r2_generic_blt \n",
+ /* No support for BG BLEND in generic
+ * implementation yet */
+ b2r2_log_warn(cont->dev, "%s: Unsupported: "
+ "Background blend in b2r2_generic_blt\n",
__func__);
return ret;
}
- b2r2_log_info("b2r2_blt=%d Going generic.\n", ret);
+ b2r2_log_info(cont->dev,
+ "b2r2_blt=%d Going generic.\n", ret);
request_gen = kmalloc(sizeof(*request_gen), GFP_KERNEL);
if (!request_gen) {
- b2r2_log_err("%s: Failed to alloc mem for "
+ b2r2_log_err(cont->dev,
+ "%s: Failed to alloc mem for "
"request_gen\n", __func__);
return -ENOMEM;
}
@@ -508,12 +446,11 @@ static long b2r2_blt_ioctl(struct file *file,
/* Get the user data */
if (copy_from_user(&request_gen->user_req, (void *)arg,
- sizeof(request_gen->user_req))) {
- b2r2_log_err(
- "%s: copy_from_user failed\n",
- __func__);
- kfree(request_gen);
- return -EFAULT;
+ sizeof(request_gen->user_req))) {
+ b2r2_log_err(cont->dev, "%s: copy_from_user "
+ "failed\n", __func__);
+ kfree(request_gen);
+ return -EFAULT;
}
/*
@@ -523,14 +460,14 @@ static long b2r2_blt_ioctl(struct file *file,
if ((request_gen->user_req.flags &
B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION)
!= 0) {
- request_gen->clut =
- dma_alloc_coherent(b2r2_blt_device(),
- CLUT_SIZE,
+ request_gen->clut = dma_alloc_coherent(
+ cont->dev, CLUT_SIZE,
&(request_gen->clut_phys_addr),
GFP_DMA | GFP_KERNEL);
if (request_gen->clut == NULL) {
- b2r2_log_err("%s CLUT allocation "
- "failed.\n", __func__);
+ b2r2_log_err(cont->dev, "%s CLUT "
+ "allocation failed.\n",
+ __func__);
kfree(request_gen);
return -ENOMEM;
}
@@ -538,10 +475,11 @@ static long b2r2_blt_ioctl(struct file *file,
if (copy_from_user(request_gen->clut,
request_gen->user_req.clut,
CLUT_SIZE)) {
- b2r2_log_err("%s: CLUT copy_from_user "
- "failed\n", __func__);
- dma_free_coherent(b2r2_blt_device(),
- CLUT_SIZE, request_gen->clut,
+ b2r2_log_err(cont->dev, "%s: CLUT"
+ " copy_from_user failed\n",
+ __func__);
+ dma_free_coherent(cont->dev, CLUT_SIZE,
+ request_gen->clut,
request_gen->clut_phys_addr);
request_gen->clut = NULL;
request_gen->clut_phys_addr = 0;
@@ -553,8 +491,8 @@ static long b2r2_blt_ioctl(struct file *file,
request_gen->profile = is_profiler_registered_approx();
ret = b2r2_generic_blt(instance, request_gen);
- b2r2_log_info("\nb2r2_generic_blt=%d Generic done.\n",
- ret);
+ b2r2_log_info(cont->dev, "\nb2r2_generic_blt=%d "
+ "Generic done.\n", ret);
}
#endif /* CONFIG_B2R2_GENERIC_FALLBACK */
@@ -562,24 +500,19 @@ static long b2r2_blt_ioctl(struct file *file,
}
case B2R2_BLT_SYNCH_IOC:
- /* This is the "synch" command */
-
/* arg is request_id */
ret = b2r2_blt_synch(instance, (int) arg);
break;
case B2R2_BLT_QUERY_CAP_IOC:
{
- /* This is the "query capabilities" command */
-
/* Arg is struct b2r2_blt_query_cap */
struct b2r2_blt_query_cap query_cap;
/* Get the user data */
if (copy_from_user(&query_cap, (void *)arg,
sizeof(query_cap))) {
- b2r2_log_err(
- "%s: copy_from_user failed\n",
+ b2r2_log_err(cont->dev, "%s: copy_from_user failed\n",
__func__);
return -EFAULT;
}
@@ -590,7 +523,7 @@ static long b2r2_blt_ioctl(struct file *file,
/* Return data to user */
if (copy_to_user((void *)arg, &query_cap,
sizeof(query_cap))) {
- b2r2_log_err("%s: copy_to_user failed\n",
+ b2r2_log_err(cont->dev, "%s: copy_to_user failed\n",
__func__);
return -EFAULT;
}
@@ -599,15 +532,14 @@ static long b2r2_blt_ioctl(struct file *file,
default:
/* Unknown command */
- b2r2_log_err(
- "%s: Unknown cmd %d\n", __func__, cmd);
+ b2r2_log_err(cont->dev, "%s: Unknown cmd %d\n", __func__, cmd);
ret = -EINVAL;
break;
}
if (ret < 0)
- b2r2_log_err("EC %d OK!\n", -ret);
+ b2r2_log_err(cont->dev, "EC %d OK!\n", -ret);
return ret;
}
@@ -623,13 +555,14 @@ static long b2r2_blt_ioctl(struct file *file,
*/
static unsigned b2r2_blt_poll(struct file *filp, poll_table *wait)
{
- struct b2r2_blt_instance *instance;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
unsigned int mask = 0;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
- b2r2_log_info("%s\n", __func__);
-
- /* Get the instance from the file structure */
- instance = (struct b2r2_blt_instance *) filp->private_data;
+ b2r2_log_info(cont->dev, "%s\n", __func__);
poll_wait(filp, &instance->report_list_waitq, wait);
mutex_lock(&instance->lock);
@@ -654,14 +587,15 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
int ret = 0;
- struct b2r2_blt_instance *instance;
- struct b2r2_blt_request *request;
+ struct b2r2_blt_request *request = NULL;
struct b2r2_blt_report report;
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) filp->private_data;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = instance->control;
+#endif
- b2r2_log_info("%s\n", __func__);
-
- /* Get the instance from the file structure */
- instance = (struct b2r2_blt_instance *) filp->private_data;
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/*
* We return only complete report records, one at a time.
@@ -686,10 +620,10 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- b2r2_log_info("%s - Going to sleep\n", __func__);
+ b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__);
if (wait_event_interruptible(
- instance->report_list_waitq,
- !is_report_list_empty(instance)))
+ instance->report_list_waitq,
+ !is_report_list_empty(instance)))
/* signal: tell the fs layer to handle it */
return -ERESTARTSYS;
@@ -697,10 +631,6 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
mutex_lock(&instance->lock);
}
- /* Ok, we have something to return */
-
- /* Return */
- request = NULL;
if (!list_empty(&instance->report_list))
request = list_first_entry(
&instance->report_list, struct b2r2_blt_request, list);
@@ -715,16 +645,14 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
report.usec_elapsed = 0; /* TBD */
mutex_unlock(&instance->lock);
- if (copy_to_user(buf,
- &report,
- sizeof(report)))
+ if (copy_to_user(buf, &report, sizeof(report)))
ret = -EFAULT;
mutex_lock(&instance->lock);
- if (ret) {
+ if (ret < 0) {
/* copy to user failed, re-insert into list */
list_add(&request->list,
- &request->instance->report_list);
+ &request->instance->report_list);
request = NULL;
}
}
@@ -752,16 +680,6 @@ static const struct file_operations b2r2_blt_fops = {
.read = b2r2_blt_read,
};
-/**
- * b2r2_blt_misc_dev - Misc device config for b2r2_blt
- */
-static struct miscdevice b2r2_blt_misc_dev = {
- MISC_DYNAMIC_MINOR,
- "b2r2_blt",
- &b2r2_blt_fops
-};
-
-
#ifndef CONFIG_B2R2_GENERIC_ONLY
/**
* b2r2_blt - Implementation of the B2R2 blit request
@@ -777,6 +695,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
int request_id = 0;
struct b2r2_node *last_node = request->first_node;
int node_count;
+ struct b2r2_control *cont = instance->control;
u32 thread_runtime_at_start = 0;
@@ -785,12 +704,12 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
thread_runtime_at_start = (u32)task_sched_runtime(current);
}
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
- inc_stat(&stat_n_in_blt);
+ inc_stat(cont, &cont->stat_n_in_blt);
/* Debug prints of incoming request */
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"src.fmt=%#010x src.buf={%d,%d,%d} "
"src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
request->user_req.src_img.fmt,
@@ -804,23 +723,22 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->user_req.src_rect.width,
request->user_req.src_rect.height);
- if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- b2r2_log_info(
- "bg.fmt=%#010x bg.buf={%d,%d,%d} "
- "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n",
- request->user_req.bg_img.fmt,
- request->user_req.bg_img.buf.type,
- request->user_req.bg_img.buf.fd,
- request->user_req.bg_img.buf.offset,
- request->user_req.bg_img.width,
- request->user_req.bg_img.height,
- request->user_req.bg_rect.x,
- request->user_req.bg_rect.y,
- request->user_req.bg_rect.width,
- request->user_req.bg_rect.height);
- }
-
- b2r2_log_info(
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev,
+ "bg.fmt=%#010x bg.buf={%d,%d,%d} "
+ "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n",
+ request->user_req.bg_img.fmt,
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ b2r2_log_info(cont->dev,
"dst.fmt=%#010x dst.buf={%d,%d,%d} "
"dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
request->user_req.dst_img.fmt,
@@ -834,74 +752,71 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->user_req.dst_rect.width,
request->user_req.dst_rect.height);
- inc_stat(&stat_n_in_blt_synch);
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
/* Wait here if synch is ongoing */
ret = wait_event_interruptible(instance->synch_done_waitq,
- !is_synching(instance));
+ !is_synching(instance));
if (ret) {
- b2r2_log_warn(
- "%s: Sync wait interrupted, %d\n",
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
__func__, ret);
ret = -EAGAIN;
- dec_stat(&stat_n_in_blt_synch);
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
goto synch_interrupted;
}
- dec_stat(&stat_n_in_blt_synch);
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
/* Resolve the buffers */
/* Source buffer */
- ret = resolve_buf(&request->user_req.src_img,
- &request->user_req.src_rect, false, &request->src_resolved);
+ ret = resolve_buf(cont, &request->user_req.src_img,
+ &request->user_req.src_rect,
+ false, &request->src_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve src buf failed, %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
+ __func__, ret);
ret = -EAGAIN;
goto resolve_src_buf_failed;
}
/* Background buffer */
if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- ret = resolve_buf(&request->user_req.bg_img,
- &request->user_req.bg_rect, false, &request->bg_resolved);
+ ret = resolve_buf(cont, &request->user_req.bg_img,
+ &request->user_req.bg_rect,
+ false, &request->bg_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve bg buf failed, %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Resolve bg buf failed,"
+ " %d\n", __func__, ret);
ret = -EAGAIN;
goto resolve_bg_buf_failed;
}
}
/* Source mask buffer */
- ret = resolve_buf(&request->user_req.src_mask,
+ ret = resolve_buf(cont, &request->user_req.src_mask,
&request->user_req.src_rect, false,
&request->src_mask_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve src mask buf failed, %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Resolve src mask buf failed,"
+ " %d\n", __func__, ret);
ret = -EAGAIN;
goto resolve_src_mask_buf_failed;
}
/* Destination buffer */
get_actual_dst_rect(&request->user_req, &actual_dst_rect);
- ret = resolve_buf(&request->user_req.dst_img, &actual_dst_rect,
- true, &request->dst_resolved);
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
+ true, &request->dst_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve dst buf failed, %d\n",
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
goto resolve_dst_buf_failed;
}
/* Debug prints of resolved buffers */
- b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->src_resolved.physical_address,
request->src_resolved.virtual_address,
request->src_resolved.is_pmem,
@@ -910,8 +825,8 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->src_resolved.file_virtual_start,
request->src_resolved.file_len);
- if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- b2r2_log_info("bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ b2r2_log_info(cont->dev, "bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->bg_resolved.physical_address,
request->bg_resolved.virtual_address,
request->bg_resolved.is_pmem,
@@ -919,9 +834,8 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->bg_resolved.file_physical_start,
request->bg_resolved.file_virtual_start,
request->bg_resolved.file_len);
- }
- b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->dst_resolved.physical_address,
request->dst_resolved.virtual_address,
request->dst_resolved.is_pmem,
@@ -931,33 +845,33 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->dst_resolved.file_len);
/* Calculate the number of nodes (and resources) needed for this job */
- ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE,
- &node_count, &request->bufs, &request->buf_count,
- &request->node_split_job);
+ ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE, &node_count,
+ &request->bufs, &request->buf_count,
+ &request->node_split_job);
if (ret == -ENOSYS) {
/* There was no optimized path for this request */
- b2r2_log_info(
- "%s: No optimized path for request\n", __func__);
+ b2r2_log_info(cont->dev, "%s: No optimized path for request\n",
+ __func__);
goto no_optimized_path;
} else if (ret < 0) {
- b2r2_log_warn(
- "%s: Failed to analyze request, ret = %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Failed to analyze request,"
+ " ret = %d\n", __func__, ret);
#ifdef CONFIG_DEBUG_FS
{
/* Failed, dump job to dmesg */
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- b2r2_log_info(
- "%s: Analyze failed for:\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Analyze failed for:\n",
+ __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- b2r2_log_info("%s", Buf);
+ b2r2_log_info(cont->dev, "%s", Buf);
kfree(Buf);
} else {
- b2r2_log_info("Unable to print the request. "
- "Message buffer allocation failed.\n");
+ b2r2_log_info(cont->dev, "Unable to print the"
+ " request. Message buffer"
+ " allocation failed.\n");
}
}
#endif
@@ -966,17 +880,17 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
/* Allocate the nodes needed */
#ifdef B2R2_USE_NODE_GEN
- request->first_node = b2r2_blt_alloc_nodes(node_count);
+ request->first_node = b2r2_blt_alloc_nodes(cont,
+ node_count);
if (request->first_node == NULL) {
- b2r2_log_warn(
- "%s: Failed to allocate nodes, ret = %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Failed to allocate nodes,"
+ " ret = %d\n", __func__, ret);
goto generate_nodes_failed;
}
#else
- ret = b2r2_node_alloc(node_count, &(request->first_node));
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
if (ret < 0 || request->first_node == NULL) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -984,12 +898,12 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
#endif
/* Build the B2R2 node list */
- ret = b2r2_node_split_configure(&request->node_split_job,
+ ret = b2r2_node_split_configure(cont, &request->node_split_job,
request->first_node);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Failed to perform node split, ret = %d\n",
+ b2r2_log_warn(cont->dev, "%s:"
+ " Failed to perform node split, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
}
@@ -1022,24 +936,21 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
(request->user_req.src_img.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.src_img.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.src_img,
- &request->src_resolved,
- false, /*is_dst*/
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
+ &request->src_resolved, false,
&request->user_req.src_rect);
/* Background buffer */
- if ((request->user_req.flags &
- B2R2_BLT_FLAG_BG_BLEND) &&
+ if ((request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) &&
!(request->user_req.flags &
B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH) &&
(request->user_req.bg_img.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.bg_img.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.bg_img,
- &request->bg_resolved,
- false, /*is_dst*/
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.bg_img,
+ &request->bg_resolved, false,
&request->user_req.bg_rect);
/* Source mask buffer */
@@ -1048,11 +959,9 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
(request->user_req.src_mask.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.src_mask,
- &request->src_mask_resolved,
- false, /*is_dst*/
- NULL);
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
+ &request->src_mask_resolved, false, NULL);
/* Destination buffer */
if (!(request->user_req.flags &
@@ -1060,21 +969,20 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
(request->user_req.dst_img.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.dst_img,
- &request->dst_resolved,
- true, /*is_dst*/
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
+ &request->dst_resolved, true,
&request->user_req.dst_rect);
#ifdef CONFIG_DEBUG_FS
/* Remember latest request for debugfs */
- debugfs_latest_request = *request;
+ cont->debugfs_latest_request = *request;
#endif
/* Submit the job */
- b2r2_log_info("%s: Submitting job\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
- inc_stat(&stat_n_in_blt_add);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
if (request->profile)
request->nsec_active_in_cpu =
@@ -1084,42 +992,41 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
mutex_lock(&instance->lock);
/* Add the job to b2r2_core */
- request_id = b2r2_core_job_add(&request->job);
+ request_id = b2r2_core_job_add(cont, &request->job);
request->request_id = request_id;
- dec_stat(&stat_n_in_blt_add);
+ dec_stat(cont, &cont->stat_n_in_blt_add);
if (request_id < 0) {
- b2r2_log_warn("%s: Failed to add job, ret = %d\n",
+ b2r2_log_warn(cont->dev, "%s: Failed to add job, ret = %d\n",
__func__, request_id);
ret = request_id;
mutex_unlock(&instance->lock);
goto job_add_failed;
}
- inc_stat(&stat_n_jobs_added);
+ inc_stat(cont, &cont->stat_n_jobs_added);
instance->no_of_active_requests++;
mutex_unlock(&instance->lock);
/* Wait for the job to be done if synchronous */
if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
- b2r2_log_info("%s: Synchronous, waiting\n",
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
__func__);
- inc_stat(&stat_n_in_blt_wait);
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
ret = b2r2_core_job_wait(&request->job);
- dec_stat(&stat_n_in_blt_wait);
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- b2r2_log_warn(
- "%s: Failed to wait job, ret = %d\n",
- __func__, ret);
+ b2r2_log_warn(cont->dev, "%s: Failed to wait job,"
+ " ret = %d\n", __func__, ret);
else
- b2r2_log_info(
- "%s: Synchronous wait done\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Synchronous wait done\n",
+ __func__);
ret = 0;
}
@@ -1128,8 +1035,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
* the request must not be accessed after this call
*/
b2r2_core_job_release(&request->job, __func__);
-
- dec_stat(&stat_n_in_blt);
+ dec_stat(cont, &cont->stat_n_in_blt);
return ret >= 0 ? request_id : ret;
@@ -1137,28 +1043,27 @@ job_add_failed:
exit_dry_run:
no_optimized_path:
generate_nodes_failed:
- unresolve_buf(&request->user_req.dst_img.buf,
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
&request->dst_resolved);
resolve_dst_buf_failed:
- unresolve_buf(&request->user_req.src_mask.buf,
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
&request->src_mask_resolved);
resolve_src_mask_buf_failed:
- if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- unresolve_buf(&request->user_req.bg_img.buf,
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
&request->bg_resolved);
- }
resolve_bg_buf_failed:
- unresolve_buf(&request->user_req.src_img.buf,
+ unresolve_buf(cont, &request->user_req.src_img.buf,
&request->src_resolved);
resolve_src_buf_failed:
synch_interrupted:
job_release(&request->job);
- dec_stat(&stat_n_jobs_released);
+ dec_stat(cont, &cont->stat_n_jobs_released);
if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
- b2r2_log_warn(
- "%s returns with error %d\n", __func__, ret);
+ b2r2_log_warn(cont->dev, "%s returns with error %d\n",
+ __func__, ret);
- dec_stat(&stat_n_in_blt);
+ dec_stat(cont, &cont->stat_n_in_blt);
return ret;
}
@@ -1172,24 +1077,24 @@ static void job_callback(struct b2r2_core_job *job)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
- if (b2r2_blt_device())
- b2r2_log_info("%s\n", __func__);
+ if (cont->dev)
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
/* Unresolve the buffers */
- unresolve_buf(&request->user_req.src_img.buf,
+ unresolve_buf(cont, &request->user_req.src_img.buf,
&request->src_resolved);
- unresolve_buf(&request->user_req.src_mask.buf,
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
&request->src_mask_resolved);
- unresolve_buf(&request->user_req.dst_img.buf,
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
&request->dst_resolved);
- if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) {
- unresolve_buf(&request->user_req.bg_img.buf,
+ if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND)
+ unresolve_buf(cont, &request->user_req.bg_img.buf,
&request->bg_resolved);
- }
/* Move to report list if the job shall be reported */
/* FIXME: Use a smaller struct? */
@@ -1198,7 +1103,7 @@ static void job_callback(struct b2r2_core_job *job)
/* Move job to report list */
list_add_tail(&request->list,
&request->instance->report_list);
- inc_stat(&stat_n_jobs_in_report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
/* Wake up poll */
wake_up_interruptible(
@@ -1215,7 +1120,7 @@ static void job_callback(struct b2r2_core_job *job)
BUG_ON(request->instance->no_of_active_requests == 0);
request->instance->no_of_active_requests--;
if (request->instance->synching &&
- request->instance->no_of_active_requests == 0) {
+ request->instance->no_of_active_requests == 0) {
request->instance->synching = false;
/* Wake up all syncing */
@@ -1229,14 +1134,14 @@ static void job_callback(struct b2r2_core_job *job)
if (job->job_state == B2R2_CORE_JOB_CANCELED) {
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- b2r2_log_info("%s: Job cancelled:\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- b2r2_log_info("%s", Buf);
+ b2r2_log_info(cont->dev, "%s", Buf);
kfree(Buf);
} else {
- b2r2_log_info("Unable to print the request. "
- "Message buffer allocation failed.\n");
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
}
}
#endif
@@ -1260,26 +1165,27 @@ static void job_release(struct b2r2_core_job *job)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
- inc_stat(&stat_n_jobs_released);
+ inc_stat(cont, &cont->stat_n_jobs_released);
- b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
__func__, request->first_node, request->job.ref_count);
- b2r2_node_split_cancel(&request->node_split_job);
+ b2r2_node_split_cancel(cont, &request->node_split_job);
if (request->first_node) {
- b2r2_debug_job_done(request->first_node);
+ b2r2_debug_job_done(cont, request->first_node);
#ifdef B2R2_USE_NODE_GEN
- b2r2_blt_free_nodes(request->first_node);
+ b2r2_blt_free_nodes(cont, request->first_node);
#else
- b2r2_node_free(request->first_node);
+ b2r2_node_free(cont, request->first_node);
#endif
}
/* Release memory for the request */
if (request->clut != NULL) {
- dma_free_coherent(b2r2_blt_device(), CLUT_SIZE, request->clut,
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
request->clut_phys_addr);
request->clut = NULL;
request->clut_phys_addr = 0;
@@ -1300,17 +1206,19 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
int ret;
int i;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
if (request->buf_count == 0)
return 0;
if (request->buf_count > MAX_TMP_BUFS_NEEDED) {
- b2r2_log_err("%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
- __func__);
+ b2r2_log_err(cont->dev,
+ "%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n",
+ __func__);
return -ENOMSG;
}
@@ -1322,29 +1230,30 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
* usage but we avoid get into a situation where lower prio jobs can
* delay higher prio jobs that require more temp buffers.
*/
- if (tmp_bufs[0].in_use)
+ if (cont->tmp_bufs[0].in_use)
return -EAGAIN;
for (i = 0; i < request->buf_count; i++) {
- if (tmp_bufs[i].buf.size < request->bufs[i].size) {
- b2r2_log_err("%s: tmp_bufs[i].buf.size < "
- "request->bufs[i].size\n",
- __func__);
+ if (cont->tmp_bufs[i].buf.size < request->bufs[i].size) {
+ b2r2_log_err(cont->dev, "%s: "
+ "cont->tmp_bufs[i].buf.size < "
+ "request->bufs[i].size\n", __func__);
ret = -ENOMSG;
goto error;
}
- tmp_bufs[i].in_use = true;
- request->bufs[i].phys_addr = tmp_bufs[i].buf.phys_addr;
- request->bufs[i].virt_addr = tmp_bufs[i].buf.virt_addr;
+ cont->tmp_bufs[i].in_use = true;
+ request->bufs[i].phys_addr = cont->tmp_bufs[i].buf.phys_addr;
+ request->bufs[i].virt_addr = cont->tmp_bufs[i].buf.virt_addr;
- b2r2_log_info("%s: phys=%p, virt=%p\n",
- __func__, (void *)request->bufs[i].phys_addr,
- request->bufs[i].virt_addr);
+ b2r2_log_info(cont->dev, "%s: phys=%p, virt=%p\n",
+ __func__, (void *)request->bufs[i].phys_addr,
+ request->bufs[i].virt_addr);
- ret = b2r2_node_split_assign_buffers(&request->node_split_job,
- request->first_node, request->bufs,
- request->buf_count);
+ ret = b2r2_node_split_assign_buffers(cont,
+ &request->node_split_job,
+ request->first_node, request->bufs,
+ request->buf_count);
if (ret < 0)
goto error;
}
@@ -1353,7 +1262,7 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
error:
for (i = 0; i < request->buf_count; i++)
- tmp_bufs[i].in_use = false;
+ cont->tmp_bufs[i].in_use = false;
return ret;
}
@@ -1371,16 +1280,17 @@ static void job_release_resources(struct b2r2_core_job *job, bool atomic)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
int i;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Free any temporary buffers */
for (i = 0; i < request->buf_count; i++) {
- b2r2_log_info("%s: freeing %d bytes\n",
- __func__, request->bufs[i].size);
- tmp_bufs[i].in_use = false;
+ b2r2_log_info(cont->dev, "%s: freeing %d bytes\n",
+ __func__, request->bufs[i].size);
+ cont->tmp_bufs[i].in_use = false;
memset(&request->bufs[i], 0, sizeof(request->bufs[i]));
}
request->buf_count = 0;
@@ -1390,12 +1300,12 @@ static void job_release_resources(struct b2r2_core_job *job, bool atomic)
* FIXME: If nodes are to be reused we don't want to release here
*/
if (!atomic && request->first_node) {
- b2r2_debug_job_done(request->first_node);
+ b2r2_debug_job_done(cont, request->first_node);
#ifdef B2R2_USE_NODE_GEN
- b2r2_blt_free_nodes(request->first_node);
+ b2r2_blt_free_nodes(cont, request->first_node);
#else
- b2r2_node_free(request->first_node);
+ b2r2_node_free(cont, request->first_node);
#endif
request->first_node = NULL;
}
@@ -1412,8 +1322,13 @@ static void job_release_resources(struct b2r2_core_job *job, bool atomic)
*/
static void tile_job_callback_gen(struct b2r2_core_job *job)
{
- if (b2r2_blt_device())
- b2r2_log_info("%s\n", __func__);
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
@@ -1421,7 +1336,8 @@ static void tile_job_callback_gen(struct b2r2_core_job *job)
#ifdef CONFIG_DEBUG_FS
/* Notify if a tile job is cancelled */
if (job->job_state == B2R2_CORE_JOB_CANCELED)
- b2r2_log_info("%s: Tile job cancelled:\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Tile job cancelled:\n",
+ __func__);
#endif
/* Local addref / release within this func */
@@ -1439,19 +1355,19 @@ static void job_callback_gen(struct b2r2_core_job *job)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
- if (b2r2_blt_device())
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
/* Unresolve the buffers */
- unresolve_buf(&request->user_req.src_img.buf,
+ unresolve_buf(cont, &request->user_req.src_img.buf,
&request->src_resolved);
- unresolve_buf(&request->user_req.src_mask.buf,
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
&request->src_mask_resolved);
- unresolve_buf(&request->user_req.dst_img.buf,
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
&request->dst_resolved);
/* Move to report list if the job shall be reported */
@@ -1462,7 +1378,7 @@ static void job_callback_gen(struct b2r2_core_job *job)
/* Move job to report list */
list_add_tail(&request->list,
&request->instance->report_list);
- inc_stat(&stat_n_jobs_in_report_list);
+ inc_stat(cont, &cont->stat_n_jobs_in_report_list);
/* Wake up poll */
wake_up_interruptible(
@@ -1496,14 +1412,14 @@ static void job_callback_gen(struct b2r2_core_job *job)
if (job->job_state == B2R2_CORE_JOB_CANCELED) {
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- b2r2_log_info("%s: Job cancelled:\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- b2r2_log_info("%s", Buf);
+ b2r2_log_info(cont->dev, "%s", Buf);
kfree(Buf);
} else {
- b2r2_log_info("Unable to print the request. "
- "Message buffer allocation failed.\n");
+ b2r2_log_info(cont->dev, "Unable to print the request."
+ " Message buffer allocation failed.\n");
}
}
#endif
@@ -1522,10 +1438,15 @@ static void job_callback_gen(struct b2r2_core_job *job)
static void tile_job_release_gen(struct b2r2_core_job *job)
{
- inc_stat(&stat_n_jobs_released);
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_control *cont = instance->control;
- b2r2_log_info("%s, first_node_address=0x%.8x, ref_count=%d\n",
- __func__, job->first_node_address, job->ref_count);
+ inc_stat(cont, &cont->stat_n_jobs_released);
+
+ b2r2_log_info(cont->dev, "%s, first_node_address=0x%.8x, ref_count="
+ "%d\n", __func__, job->first_node_address,
+ job->ref_count);
/* Release memory for the job */
kfree(job);
@@ -1541,26 +1462,27 @@ static void job_release_gen(struct b2r2_core_job *job)
{
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
+ struct b2r2_control *cont = request->instance->control;
- inc_stat(&stat_n_jobs_released);
+ inc_stat(cont, &cont->stat_n_jobs_released);
- b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
- __func__, request->first_node, request->job.ref_count);
+ b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n",
+ __func__, request->first_node, request->job.ref_count);
if (request->first_node) {
- b2r2_debug_job_done(request->first_node);
+ b2r2_debug_job_done(cont, request->first_node);
/* Free nodes */
#ifdef B2R2_USE_NODE_GEN
- b2r2_blt_free_nodes(request->first_node);
+ b2r2_blt_free_nodes(cont, request->first_node);
#else
- b2r2_node_free(request->first_node);
+ b2r2_node_free(cont, request->first_node);
#endif
}
/* Release memory for the request */
if (request->clut != NULL) {
- dma_free_coherent(b2r2_blt_device(), CLUT_SIZE, request->clut,
+ dma_free_coherent(cont->dev, CLUT_SIZE, request->clut,
request->clut_phys_addr);
request->clut = NULL;
request->clut_phys_addr = 0;
@@ -1605,6 +1527,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
struct b2r2_work_buf work_bufs[4];
struct b2r2_blt_rect dst_rect_tile;
int i;
+ struct b2r2_control *cont = instance->control;
u32 thread_runtime_at_start = 0;
s32 nsec_active_in_b2r2 = 0;
@@ -1635,12 +1558,12 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
memset(work_bufs, 0, sizeof(work_bufs));
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
- inc_stat(&stat_n_in_blt);
+ inc_stat(cont, &cont->stat_n_in_blt);
/* Debug prints of incoming request */
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
"src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
request->user_req.src_img.fmt,
@@ -1654,7 +1577,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->user_req.src_rect.y,
request->user_req.src_rect.width,
request->user_req.src_rect.height);
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
"dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
"dst_clip_rect={%d,%d,%d,%d}\n",
@@ -1673,41 +1596,39 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->user_req.dst_clip_rect.width,
request->user_req.dst_clip_rect.height);
- inc_stat(&stat_n_in_blt_synch);
+ inc_stat(cont, &cont->stat_n_in_blt_synch);
/* Wait here if synch is ongoing */
ret = wait_event_interruptible(instance->synch_done_waitq,
!is_synching(instance));
if (ret) {
- b2r2_log_warn(
- "%s: Sync wait interrupted, %d\n",
+ b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n",
__func__, ret);
ret = -EAGAIN;
- dec_stat(&stat_n_in_blt_synch);
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
goto synch_interrupted;
}
- dec_stat(&stat_n_in_blt_synch);
+ dec_stat(cont, &cont->stat_n_in_blt_synch);
/* Resolve the buffers */
/* Source buffer */
- ret = resolve_buf(&request->user_req.src_img,
+ ret = resolve_buf(cont, &request->user_req.src_img,
&request->user_req.src_rect, false, &request->src_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve src buf failed, %d\n",
+ b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
goto resolve_src_buf_failed;
}
/* Source mask buffer */
- ret = resolve_buf(&request->user_req.src_mask,
+ ret = resolve_buf(cont, &request->user_req.src_mask,
&request->user_req.src_rect, false,
&request->src_mask_resolved);
if (ret < 0) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Resolve src mask buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -1716,18 +1637,17 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Destination buffer */
get_actual_dst_rect(&request->user_req, &actual_dst_rect);
- ret = resolve_buf(&request->user_req.dst_img, &actual_dst_rect,
+ ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect,
true, &request->dst_resolved);
if (ret < 0) {
- b2r2_log_warn(
- "%s: Resolve dst buf failed, %d\n",
+ b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
goto resolve_dst_buf_failed;
}
/* Debug prints of resolved buffers */
- b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->src_resolved.physical_address,
request->src_resolved.virtual_address,
request->src_resolved.is_pmem,
@@ -1736,7 +1656,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->src_resolved.file_virtual_start,
request->src_resolved.file_len);
- b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->dst_resolved.physical_address,
request->dst_resolved.virtual_address,
request->dst_resolved.is_pmem,
@@ -1749,7 +1669,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = b2r2_generic_analyze(request, &tmp_buf_width,
&tmp_buf_height, &tmp_buf_count, &node_count);
if (ret < 0) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to analyze request, ret = %d\n",
__func__, ret);
#ifdef CONFIG_DEBUG_FS
@@ -1757,14 +1677,15 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Failed, dump job to dmesg */
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"%s: Analyze failed for:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- b2r2_log_info("%s", Buf);
+ b2r2_log_info(cont->dev, "%s", Buf);
kfree(Buf);
} else {
- b2r2_log_info("Unable to print the request. "
+ b2r2_log_info(cont->dev,
+ "Unable to print the request. "
"Message buffer allocation failed.\n");
}
}
@@ -1774,17 +1695,17 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Allocate the nodes needed */
#ifdef B2R2_USE_NODE_GEN
- request->first_node = b2r2_blt_alloc_nodes(node_count);
+ request->first_node = b2r2_blt_alloc_nodes(cont, node_count);
if (request->first_node == NULL) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
}
#else
- ret = b2r2_node_alloc(node_count, &(request->first_node));
+ ret = b2r2_node_alloc(cont, node_count, &(request->first_node));
if (ret < 0 || request->first_node == NULL) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -1796,7 +1717,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
void *virt;
work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
- virt = dma_alloc_coherent(b2r2_blt_device(),
+ virt = dma_alloc_coherent(cont->dev,
work_bufs[i].size,
&(work_bufs[i].phys_addr),
GFP_DMA | GFP_KERNEL);
@@ -1812,7 +1733,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->first_node, &work_bufs[0], tmp_buf_count);
if (ret < 0) {
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to perform generic configure, ret = %d\n",
__func__, ret);
goto generic_conf_failed;
@@ -1851,8 +1772,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
(request->user_req.src_img.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.src_img.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.src_img,
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_img,
&request->src_resolved,
false, /*is_dst*/
&request->user_req.src_rect);
@@ -1862,8 +1783,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
(request->user_req.src_mask.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.src_mask.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.src_mask,
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.src_mask,
&request->src_mask_resolved,
false, /*is_dst*/
NULL);
@@ -1873,15 +1794,15 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
(request->user_req.dst_img.buf.type !=
B2R2_BLT_PTR_PHYSICAL) &&
!b2r2_is_mb_fmt(request->user_req.dst_img.fmt))
- /* MB formats are never touched by SW */
- sync_buf(&request->user_req.dst_img,
+ /* MB formats are never touched by SW */
+ sync_buf(cont, &request->user_req.dst_img,
&request->dst_resolved,
true, /*is_dst*/
&request->user_req.dst_rect);
#ifdef CONFIG_DEBUG_FS
/* Remember latest request */
- debugfs_latest_request = *request;
+ cont->debugfs_latest_request = *request;
#endif
/*
@@ -1919,8 +1840,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
if (dst_rect->x < 0)
x = -dst_rect->x;
- for (; x < dst_rect->width &&
- x + dst_rect->x < dst_img_width;
+ for (; x < dst_rect->width && x + dst_rect->x < dst_img_width;
x += tmp_buf_width) {
/*
* Tile jobs are freed by the supplied release function
@@ -1935,9 +1855,9 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
* with rest of the tiles.
* Memory might become available.
*/
- b2r2_log_info("%s: Failed to alloc job. "
- "Skipping tile at (x, y)=(%d, %d)\n",
- __func__, x, y);
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
continue;
}
tile_job->tag = request->job.tag;
@@ -1949,8 +1869,10 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
tile_job->callback = tile_job_callback_gen;
tile_job->release = tile_job_release_gen;
/* Work buffers and nodes are pre-allocated */
- tile_job->acquire_resources = job_acquire_resources_gen;
- tile_job->release_resources = job_release_resources_gen;
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
dst_rect_tile.x = x;
if (x + dst_rect->x + tmp_buf_width > dst_img_width) {
@@ -1978,18 +1900,19 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
b2r2_generic_set_areas(request,
request->first_node, &dst_rect_tile);
/* Submit the job */
- b2r2_log_info("%s: Submitting job\n", __func__);
+ b2r2_log_info(cont->dev,
+ "%s: Submitting job\n", __func__);
- inc_stat(&stat_n_in_blt_add);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
mutex_lock(&instance->lock);
- request_id = b2r2_core_job_add(tile_job);
+ request_id = b2r2_core_job_add(cont, tile_job);
- dec_stat(&stat_n_in_blt_add);
+ dec_stat(cont, &cont->stat_n_in_blt_add);
if (request_id < 0) {
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Failed to add tile job, ret = %d\n",
__func__, request_id);
ret = request_id;
@@ -1997,26 +1920,26 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
goto job_add_failed;
}
- inc_stat(&stat_n_jobs_added);
+ inc_stat(cont, &cont->stat_n_jobs_added);
mutex_unlock(&instance->lock);
/* Wait for the job to be done */
- b2r2_log_info("%s: Synchronous, waiting\n",
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
__func__);
- inc_stat(&stat_n_in_blt_wait);
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
ret = b2r2_core_job_wait(tile_job);
- dec_stat(&stat_n_in_blt_wait);
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to wait job, ret = %d\n",
__func__, ret);
else {
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"%s: Synchronous wait done\n",
__func__);
@@ -2048,9 +1971,9 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
*/
tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
if (tile_job == NULL) {
- b2r2_log_info("%s: Failed to alloc job. "
- "Skipping tile at (x, y)=(%d, %d)\n",
- __func__, x, y);
+ b2r2_log_info(cont->dev, "%s: Failed to alloc "
+ "job. Skipping tile at (x, y)="
+ "(%d, %d)\n", __func__, x, y);
continue;
}
tile_job->tag = request->job.tag;
@@ -2061,8 +1984,10 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->job.last_node_address;
tile_job->callback = tile_job_callback_gen;
tile_job->release = tile_job_release_gen;
- tile_job->acquire_resources = job_acquire_resources_gen;
- tile_job->release_resources = job_release_resources_gen;
+ tile_job->acquire_resources =
+ job_acquire_resources_gen;
+ tile_job->release_resources =
+ job_release_resources_gen;
}
dst_rect_tile.x = x;
@@ -2100,27 +2025,27 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
b2r2_generic_set_areas(request,
request->first_node, &dst_rect_tile);
- b2r2_log_info("%s: Submitting job\n", __func__);
- inc_stat(&stat_n_in_blt_add);
+ b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__);
+ inc_stat(cont, &cont->stat_n_in_blt_add);
mutex_lock(&instance->lock);
if (x + tmp_buf_width < dst_rect->width &&
x + dst_rect->x + tmp_buf_width <
dst_img_width) {
- request_id = b2r2_core_job_add(tile_job);
+ request_id = b2r2_core_job_add(cont, tile_job);
} else {
/*
* Last tile. Send the job-struct from the request.
* Clients will be notified once it completes.
*/
- request_id = b2r2_core_job_add(&request->job);
+ request_id = b2r2_core_job_add(cont, &request->job);
}
- dec_stat(&stat_n_in_blt_add);
+ dec_stat(cont, &cont->stat_n_in_blt_add);
if (request_id < 0) {
- b2r2_log_warn("%s: Failed to add tile job, ret = %d\n",
- __func__, request_id);
+ b2r2_log_warn(cont->dev, "%s: Failed to add tile job, "
+ "ret = %d\n", __func__, request_id);
ret = request_id;
mutex_unlock(&instance->lock);
if (tile_job != NULL)
@@ -2128,13 +2053,13 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
goto job_add_failed;
}
- inc_stat(&stat_n_jobs_added);
+ inc_stat(cont, &cont->stat_n_jobs_added);
mutex_unlock(&instance->lock);
- b2r2_log_info("%s: Synchronous, waiting\n",
+ b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n",
__func__);
- inc_stat(&stat_n_in_blt_wait);
+ inc_stat(cont, &cont->stat_n_in_blt_wait);
if (x + tmp_buf_width < dst_rect->width &&
x + dst_rect->x + tmp_buf_width <
dst_img_width) {
@@ -2146,14 +2071,14 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
*/
ret = b2r2_core_job_wait(&request->job);
}
- dec_stat(&stat_n_in_blt_wait);
+ dec_stat(cont, &cont->stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- b2r2_log_warn(
+ b2r2_log_warn(cont->dev,
"%s: Failed to wait job, ret = %d\n",
__func__, ret);
else {
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"%s: Synchronous wait done\n", __func__);
if (x + tmp_buf_width < dst_rect->width &&
@@ -2198,10 +2123,10 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
}
}
- dec_stat(&stat_n_in_blt);
+ dec_stat(cont, &cont->stat_n_in_blt);
for (i = 0; i < tmp_buf_count; i++) {
- dma_free_coherent(b2r2_blt_device(),
+ dma_free_coherent(cont->dev,
work_bufs[i].size,
work_bufs[i].virt_addr,
work_bufs[i].phys_addr);
@@ -2216,7 +2141,7 @@ generic_conf_failed:
alloc_work_bufs_failed:
for (i = 0; i < 4; i++) {
if (work_bufs[i].virt_addr != 0) {
- dma_free_coherent(b2r2_blt_device(),
+ dma_free_coherent(cont->dev,
work_bufs[i].size,
work_bufs[i].virt_addr,
work_bufs[i].phys_addr);
@@ -2225,22 +2150,22 @@ alloc_work_bufs_failed:
}
generate_nodes_failed:
- unresolve_buf(&request->user_req.dst_img.buf,
+ unresolve_buf(cont, &request->user_req.dst_img.buf,
&request->dst_resolved);
resolve_dst_buf_failed:
- unresolve_buf(&request->user_req.src_mask.buf,
+ unresolve_buf(cont, &request->user_req.src_mask.buf,
&request->src_mask_resolved);
resolve_src_mask_buf_failed:
- unresolve_buf(&request->user_req.src_img.buf,
+ unresolve_buf(cont, &request->user_req.src_img.buf,
&request->src_resolved);
resolve_src_buf_failed:
synch_interrupted:
zero_blt:
job_release_gen(&request->job);
- dec_stat(&stat_n_jobs_released);
- dec_stat(&stat_n_in_blt);
+ dec_stat(cont, &cont->stat_n_jobs_released);
+ dec_stat(cont, &cont->stat_n_in_blt);
- b2r2_log_info("b2r2:%s ret=%d", __func__, ret);
+ b2r2_log_info(cont->dev, "b2r2:%s ret=%d", __func__, ret);
return ret;
}
#endif /* CONFIG_B2R2_GENERIC */
@@ -2256,11 +2181,13 @@ static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
int request_id)
{
int ret = 0;
- b2r2_log_info("%s, request_id=%d\n", __func__, request_id);
+ struct b2r2_control *cont = instance->control;
+
+ b2r2_log_info(cont->dev, "%s, request_id=%d\n", __func__, request_id);
if (request_id == 0) {
/* Wait for all requests */
- inc_stat(&stat_n_in_synch_0);
+ inc_stat(cont, &cont->stat_n_in_synch_0);
/* Enter state "synching" if we have any active request */
mutex_lock(&instance->lock);
@@ -2270,15 +2197,15 @@ static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
/* Wait until no longer in state synching */
ret = wait_event_interruptible(instance->synch_done_waitq,
- !is_synching(instance));
- dec_stat(&stat_n_in_synch_0);
+ !is_synching(instance));
+ dec_stat(cont, &cont->stat_n_in_synch_0);
} else {
struct b2r2_core_job *job;
- inc_stat(&stat_n_in_synch_job);
+ inc_stat(cont, &cont->stat_n_in_synch_job);
/* Wait for specific job */
- job = b2r2_core_job_find(request_id);
+ job = b2r2_core_job_find(cont, request_id);
if (job) {
/* Wait on find job */
ret = b2r2_core_job_wait(job);
@@ -2287,11 +2214,10 @@ static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
}
/* If job not found we assume that is has been run */
-
- dec_stat(&stat_n_in_synch_job);
+ dec_stat(cont, &cont->stat_n_in_synch_job);
}
- b2r2_log_info(
+ b2r2_log_info(cont->dev,
"%s, request_id=%d, returns %d\n", __func__, request_id, ret);
return ret;
@@ -2321,11 +2247,12 @@ static void get_actual_dst_rect(struct b2r2_blt_req *req,
if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP)
b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect,
- actual_dst_rect);
+ actual_dst_rect);
}
-static void set_up_hwmem_region(struct b2r2_blt_img *img,
- struct b2r2_blt_rect *rect, struct hwmem_region *region)
+static void set_up_hwmem_region(struct b2r2_control *cont,
+ struct b2r2_blt_img *img, struct b2r2_blt_rect *rect,
+ struct hwmem_region *region)
{
s32 img_size;
@@ -2334,15 +2261,15 @@ static void set_up_hwmem_region(struct b2r2_blt_img *img,
if (b2r2_is_zero_area_rect(rect))
return;
- img_size = b2r2_get_img_size(img);
+ img_size = b2r2_get_img_size(cont, img);
if (b2r2_is_single_plane_fmt(img->fmt) &&
- b2r2_is_independent_pixel_fmt(img->fmt)) {
- int img_fmt_bpp = b2r2_get_fmt_bpp(img->fmt);
- u32 img_pitch = b2r2_get_img_pitch(img);
+ b2r2_is_independent_pixel_fmt(img->fmt)) {
+ int img_fmt_bpp = b2r2_get_fmt_bpp(cont, img->fmt);
+ u32 img_pitch = b2r2_get_img_pitch(cont, img);
region->offset = (u32)(img->buf.offset + (rect->y *
- img_pitch));
+ img_pitch));
region->count = (u32)rect->height;
region->start = (u32)((rect->x * img_fmt_bpp) / 8);
region->end = (u32)b2r2_div_round_up(
@@ -2363,7 +2290,8 @@ static void set_up_hwmem_region(struct b2r2_blt_img *img,
}
}
-static int resolve_hwmem(struct b2r2_blt_img *img,
+static int resolve_hwmem(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
struct b2r2_blt_rect *rect_2b_used,
bool is_dst,
struct b2r2_resolved_buf *resolved_buf)
@@ -2380,8 +2308,8 @@ static int resolve_hwmem(struct b2r2_blt_img *img,
hwmem_resolve_by_name(img->buf.hwmem_buf_name);
if (IS_ERR(resolved_buf->hwmem_alloc)) {
return_value = PTR_ERR(resolved_buf->hwmem_alloc);
- b2r2_log_info("%s: hwmem_resolve_by_name failed, "
- "error code: %i\n", __func__, return_value);
+ b2r2_log_info(cont->dev, "%s: hwmem_resolve_by_name failed, "
+ "error code: %i\n", __func__, return_value);
goto resolve_failed;
}
@@ -2389,25 +2317,27 @@ static int resolve_hwmem(struct b2r2_blt_img *img,
&mem_type, &access);
required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) |
- HWMEM_ACCESS_IMPORT;
+ HWMEM_ACCESS_IMPORT;
if ((required_access & access) != required_access) {
- b2r2_log_info("%s: Insufficient access to hwmem buffer.\n",
- __func__);
+ b2r2_log_info(cont->dev, "%s: Insufficient access to hwmem "
+ "buffer.\n", __func__);
return_value = -EACCES;
goto access_check_failed;
}
if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) {
- b2r2_log_info("%s: Hwmem buffer is scattered.\n", __func__);
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer is scattered.\n",
+ __func__);
return_value = -EINVAL;
goto buf_scattered;
}
if (resolved_buf->file_len <
- img->buf.offset + (__u32)b2r2_get_img_size(img)) {
- b2r2_log_info("%s: Hwmem buffer too small. (%d < %d) \n", __func__,
- resolved_buf->file_len,
- img->buf.offset + (__u32)b2r2_get_img_size(img));
+ img->buf.offset + (__u32)b2r2_get_img_size(cont, img)) {
+ b2r2_log_info(cont->dev, "%s: Hwmem buffer too small. (%d < "
+ "%d)\n", __func__, resolved_buf->file_len,
+ img->buf.offset +
+ (__u32)b2r2_get_img_size(cont, img));
return_value = -EINVAL;
goto size_check_failed;
}
@@ -2415,18 +2345,18 @@ static int resolve_hwmem(struct b2r2_blt_img *img,
return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk,
&mem_chunk_length);
if (return_value < 0) {
- b2r2_log_info("%s: hwmem_pin failed, "
- "error code: %i\n", __func__, return_value);
+ b2r2_log_info(cont->dev, "%s: hwmem_pin failed, "
+ "error code: %i\n", __func__, return_value);
goto pin_failed;
}
resolved_buf->file_physical_start = mem_chunk.paddr;
- set_up_hwmem_region(img, rect_2b_used, &region);
+ set_up_hwmem_region(cont, img, rect_2b_used, &region);
return_value = hwmem_set_domain(resolved_buf->hwmem_alloc,
- required_access, HWMEM_DOMAIN_SYNC, &region);
+ required_access, HWMEM_DOMAIN_SYNC, &region);
if (return_value < 0) {
- b2r2_log_info("%s: hwmem_set_domain failed, "
- "error code: %i\n", __func__, return_value);
+ b2r2_log_info(cont->dev, "%s: hwmem_set_domain failed, "
+ "error code: %i\n", __func__, return_value);
goto set_domain_failed;
}
@@ -2462,8 +2392,9 @@ static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf)
*
* Returns 0 if OK else negative error code
*/
-static void unresolve_buf(struct b2r2_blt_buf *buf,
- struct b2r2_resolved_buf *resolved)
+static void unresolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_buf *buf,
+ struct b2r2_resolved_buf *resolved)
{
#ifdef CONFIG_ANDROID_PMEM
if (resolved->is_pmem && resolved->filep)
@@ -2525,7 +2456,8 @@ static int get_fb_info(struct file *file,
*
* Returns 0 if OK else negative error code
*/
-static int resolve_buf(struct b2r2_blt_img *img,
+static int resolve_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
struct b2r2_blt_rect *rect_2b_used,
bool is_dst,
struct b2r2_resolved_buf *resolved)
@@ -2584,19 +2516,18 @@ static int resolve_buf(struct b2r2_blt_img *img,
if (img->buf.offset + img->buf.len >
resolved->file_len) {
ret = -ESPIPE;
- unresolve_buf(&img->buf, resolved);
+ unresolve_buf(cont, &img->buf, resolved);
}
break;
}
case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET:
- ret = resolve_hwmem(img, rect_2b_used, is_dst, resolved);
+ ret = resolve_hwmem(cont, img, rect_2b_used, is_dst, resolved);
break;
default:
- b2r2_log_warn(
- "%s: Failed to resolve buf type %d\n",
+ b2r2_log_warn(cont->dev, "%s: Failed to resolve buf type %d\n",
__func__, img->buf.type);
ret = -EINVAL;
@@ -2619,7 +2550,8 @@ static int resolve_buf(struct b2r2_blt_img *img,
* @img_width: width of the complete image buffer
* @fmt: buffer format
*/
-static void sync_buf(struct b2r2_blt_img *img,
+static void sync_buf(struct b2r2_control *cont,
+ struct b2r2_blt_img *img,
struct b2r2_resolved_buf *resolved,
bool is_dst,
struct b2r2_blt_rect *rect)
@@ -2810,23 +2742,15 @@ static bool is_synching(struct b2r2_blt_instance *instance)
}
/**
- * b2r2_blt_devide() - Returns the B2R2 blt device for logging
- */
-struct device *b2r2_blt_device(void)
-{
- return b2r2_blt_dev ? b2r2_blt_dev->this_device : NULL;
-}
-
-/**
* inc_stat() - Spin lock protected increment of statistics variable
*
* @stat: Pointer to statistics variable that should be incremented
*/
-static void inc_stat(unsigned long *stat)
+static void inc_stat(struct b2r2_control *cont, unsigned long *stat)
{
- mutex_lock(&stat_lock);
+ mutex_lock(&cont->stat_lock);
(*stat)++;
- mutex_unlock(&stat_lock);
+ mutex_unlock(&cont->stat_lock);
}
/**
@@ -2834,11 +2758,11 @@ static void inc_stat(unsigned long *stat)
*
* @stat: Pointer to statistics variable that should be decremented
*/
-static void dec_stat(unsigned long *stat)
+static void dec_stat(struct b2r2_control *cont, unsigned long *stat)
{
- mutex_lock(&stat_lock);
+ mutex_lock(&cont->stat_lock);
(*stat)--;
- mutex_unlock(&stat_lock);
+ mutex_unlock(&cont->stat_lock);
}
@@ -2856,172 +2780,196 @@ static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
{
size_t dev_size = 0;
+ /* generic request info */
dev_size += sprintf(buf + dev_size,
- "instance: %p\n\n",
- request->instance);
-
+ "instance : 0x%08lX\n",
+ (unsigned long) request->instance);
+ dev_size += sprintf(buf + dev_size,
+ "size : %d bytes\n", request->user_req.size);
+ dev_size += sprintf(buf + dev_size,
+ "flags : 0x%08lX\n",
+ (unsigned long) request->user_req.flags);
+ dev_size += sprintf(buf + dev_size,
+ "transform : %d\n",
+ (int) request->user_req.transform);
+ dev_size += sprintf(buf + dev_size,
+ "prio : %d\n", request->user_req.transform);
dev_size += sprintf(buf + dev_size,
- "size: %d bytes\n",
- request->user_req.size);
+ "global_alpha : %d\n",
+ (int) request->user_req.global_alpha);
dev_size += sprintf(buf + dev_size,
- "flags: %8lX\n",
- (unsigned long) request->user_req.flags);
+ "report1 : 0x%08lX\n",
+ (unsigned long) request->user_req.report1);
dev_size += sprintf(buf + dev_size,
- "transform: %3lX\n",
- (unsigned long) request->user_req.transform);
+ "report2 : 0x%08lX\n",
+ (unsigned long) request->user_req.report2);
dev_size += sprintf(buf + dev_size,
- "prio: %d\n",
- request->user_req.transform);
+ "request_id : 0x%08lX\n\n",
+ (unsigned long) request->request_id);
+
+ /* src info */
dev_size += sprintf(buf + dev_size,
- "src_img.fmt: %#010x\n",
- request->user_req.src_img.fmt);
+ "src_img.fmt : %#010x\n",
+ request->user_req.src_img.fmt);
dev_size += sprintf(buf + dev_size,
- "src_img.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
- "offset=%d,len=%d}\n",
- request->user_req.src_img.buf.type,
- request->user_req.src_img.buf.hwmem_buf_name,
- request->user_req.src_img.buf.fd,
- request->user_req.src_img.buf.offset,
- request->user_req.src_img.buf.len);
+ "src_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d, "
+ "offset=%d, len=%d}\n",
+ request->user_req.src_img.buf.type,
+ request->user_req.src_img.buf.hwmem_buf_name,
+ request->user_req.src_img.buf.fd,
+ request->user_req.src_img.buf.offset,
+ request->user_req.src_img.buf.len);
dev_size += sprintf(buf + dev_size,
- "src_img.{width=%d,height=%d,pitch=%d}\n",
- request->user_req.src_img.width,
- request->user_req.src_img.height,
- request->user_req.src_img.pitch);
+ "src_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_img.width,
+ request->user_req.src_img.height,
+ request->user_req.src_img.pitch);
dev_size += sprintf(buf + dev_size,
- "src_mask.fmt: %#010x\n",
- request->user_req.src_mask.fmt);
+ "src_mask.fmt : %#010x\n",
+ request->user_req.src_mask.fmt);
dev_size += sprintf(buf + dev_size,
- "src_mask.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
- "offset=%d,len=%d}\n",
- request->user_req.src_mask.buf.type,
- request->user_req.src_mask.buf.hwmem_buf_name,
- request->user_req.src_mask.buf.fd,
- request->user_req.src_mask.buf.offset,
- request->user_req.src_mask.buf.len);
+ "src_mask.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.src_mask.buf.type,
+ request->user_req.src_mask.buf.hwmem_buf_name,
+ request->user_req.src_mask.buf.fd,
+ request->user_req.src_mask.buf.offset,
+ request->user_req.src_mask.buf.len);
dev_size += sprintf(buf + dev_size,
- "src_mask.{width=%d,height=%d,pitch=%d}\n",
- request->user_req.src_mask.width,
- request->user_req.src_mask.height,
- request->user_req.src_mask.pitch);
+ "src_mask : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.src_mask.width,
+ request->user_req.src_mask.height,
+ request->user_req.src_mask.pitch);
dev_size += sprintf(buf + dev_size,
- "src_rect.{x=%d,y=%d,width=%d,height=%d}\n",
- request->user_req.src_rect.x,
- request->user_req.src_rect.y,
- request->user_req.src_rect.width,
- request->user_req.src_rect.height);
+ "src_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.src_rect.x,
+ request->user_req.src_rect.y,
+ request->user_req.src_rect.width,
+ request->user_req.src_rect.height);
dev_size += sprintf(buf + dev_size,
- "src_color=%08lX\n",
- (unsigned long) request->user_req.src_color);
+ "src_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.src_color);
+ /* bg info */
dev_size += sprintf(buf + dev_size,
- "dst_img.fmt: %#010x\n",
- request->user_req.dst_img.fmt);
+ "bg_img.fmt : %#010x\n",
+ request->user_req.bg_img.fmt);
dev_size += sprintf(buf + dev_size,
- "dst_img.buf: {type=%d,hwmem_buf_name=%d,fd=%d,"
- "offset=%d,len=%d}\n",
- request->user_req.dst_img.buf.type,
- request->user_req.dst_img.buf.hwmem_buf_name,
- request->user_req.dst_img.buf.fd,
- request->user_req.dst_img.buf.offset,
- request->user_req.dst_img.buf.len);
+ "bg_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.bg_img.buf.type,
+ request->user_req.bg_img.buf.hwmem_buf_name,
+ request->user_req.bg_img.buf.fd,
+ request->user_req.bg_img.buf.offset,
+ request->user_req.bg_img.buf.len);
dev_size += sprintf(buf + dev_size,
- "dst_img.{width=%d,height=%d,pitch=%d}\n",
- request->user_req.dst_img.width,
- request->user_req.dst_img.height,
- request->user_req.dst_img.pitch);
+ "bg_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.bg_img.width,
+ request->user_req.bg_img.height,
+ request->user_req.bg_img.pitch);
dev_size += sprintf(buf + dev_size,
- "dst_rect.{x=%d,y=%d,width=%d,height=%d}\n",
- request->user_req.dst_rect.x,
- request->user_req.dst_rect.y,
- request->user_req.dst_rect.width,
- request->user_req.dst_rect.height);
+ "bg_rect : {x=%d, y=%d, width=%d, height=%d}\n\n",
+ request->user_req.bg_rect.x,
+ request->user_req.bg_rect.y,
+ request->user_req.bg_rect.width,
+ request->user_req.bg_rect.height);
+
+ /* dst info */
dev_size += sprintf(buf + dev_size,
- "dst_clip_rect.{x=%d,y=%d,width=%d,height=%d}\n",
- request->user_req.dst_clip_rect.x,
- request->user_req.dst_clip_rect.y,
- request->user_req.dst_clip_rect.width,
- request->user_req.dst_clip_rect.height);
+ "dst_img.fmt : %#010x\n",
+ request->user_req.dst_img.fmt);
dev_size += sprintf(buf + dev_size,
- "dst_color=%08lX\n",
- (unsigned long) request->user_req.dst_color);
+ "dst_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d,"
+ " offset=%d, len=%d}\n",
+ request->user_req.dst_img.buf.type,
+ request->user_req.dst_img.buf.hwmem_buf_name,
+ request->user_req.dst_img.buf.fd,
+ request->user_req.dst_img.buf.offset,
+ request->user_req.dst_img.buf.len);
dev_size += sprintf(buf + dev_size,
- "global_alpha=%d\n",
- (int) request->user_req.global_alpha);
+ "dst_img : {width=%d, height=%d, pitch=%d}\n",
+ request->user_req.dst_img.width,
+ request->user_req.dst_img.height,
+ request->user_req.dst_img.pitch);
dev_size += sprintf(buf + dev_size,
- "report1=%08lX\n",
- (unsigned long) request->user_req.report1);
+ "dst_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_rect.x,
+ request->user_req.dst_rect.y,
+ request->user_req.dst_rect.width,
+ request->user_req.dst_rect.height);
dev_size += sprintf(buf + dev_size,
- "report2=%08lX\n",
- (unsigned long) request->user_req.report2);
-
+ "dst_clip_rect : {x=%d, y=%d, width=%d, height=%d}\n",
+ request->user_req.dst_clip_rect.x,
+ request->user_req.dst_clip_rect.y,
+ request->user_req.dst_clip_rect.width,
+ request->user_req.dst_clip_rect.height);
dev_size += sprintf(buf + dev_size,
- "request_id: %d\n",
- request->request_id);
+ "dst_color : 0x%08lX\n\n",
+ (unsigned long) request->user_req.dst_color);
dev_size += sprintf(buf + dev_size,
- "src_resolved.physical: %lX\n",
- (unsigned long) request->src_resolved.
- physical_address);
+ "src_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ physical_address);
dev_size += sprintf(buf + dev_size,
- "src_resolved.virtual: %p\n",
- request->src_resolved.virtual_address);
+ "src_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_resolved.virtual_address);
dev_size += sprintf(buf + dev_size,
- "src_resolved.filep: %p\n",
- request->src_resolved.filep);
+ "src_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_resolved.filep);
dev_size += sprintf(buf + dev_size,
- "src_resolved.filep_physical_start: %lX\n",
- (unsigned long) request->src_resolved.
- file_physical_start);
+ "src_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.
+ file_physical_start);
dev_size += sprintf(buf + dev_size,
- "src_resolved.filep_virtual_start: %p\n",
- (void *) request->src_resolved.file_virtual_start);
+ "src_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_resolved.file_virtual_start);
dev_size += sprintf(buf + dev_size,
- "src_resolved.file_len: %d\n",
- request->src_resolved.file_len);
+ "src_resolved.file_len : %d\n\n",
+ request->src_resolved.file_len);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.physical: %lX\n",
- (unsigned long) request->src_mask_resolved.
- physical_address);
+ "src_mask_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ physical_address);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.virtual: %p\n",
- request->src_mask_resolved.virtual_address);
+ "src_mask_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.virtual_address);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.filep: %p\n",
- request->src_mask_resolved.filep);
+ "src_mask_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.filep);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.filep_physical_start: %lX\n",
- (unsigned long) request->src_mask_resolved.
- file_physical_start);
+ "src_mask_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_physical_start);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.filep_virtual_start: %p\n",
- (void *) request->src_mask_resolved.
- file_virtual_start);
+ "src_mask_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->src_mask_resolved.
+ file_virtual_start);
dev_size += sprintf(buf + dev_size,
- "src_mask_resolved.file_len: %d\n",
- request->src_mask_resolved.file_len);
+ "src_mask_resolved.file_len : %d\n\n",
+ request->src_mask_resolved.file_len);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.physical: %lX\n",
- (unsigned long) request->dst_resolved.
- physical_address);
+ "dst_resolved.physical : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ physical_address);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.virtual: %p\n",
- request->dst_resolved.virtual_address);
+ "dst_resolved.virtual : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.virtual_address);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.filep: %p\n",
- request->dst_resolved.filep);
+ "dst_resolved.filep : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.filep);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.filep_physical_start: %lX\n",
- (unsigned long) request->dst_resolved.
- file_physical_start);
+ "dst_resolved.filep_physical_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.
+ file_physical_start);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.filep_virtual_start: %p\n",
- (void *) request->dst_resolved.file_virtual_start);
+ "dst_resolved.filep_virtual_start : 0x%08lX\n",
+ (unsigned long) request->dst_resolved.file_virtual_start);
dev_size += sprintf(buf + dev_size,
- "dst_resolved.file_len: %d\n",
- request->dst_resolved.file_len);
+ "dst_resolved.file_len : %d\n\n",
+ request->dst_resolved.file_len);
return dev_size;
}
@@ -3042,13 +2990,14 @@ static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf,
size_t dev_size = 0;
int ret = 0;
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
if (Buf == NULL) {
ret = -ENOMEM;
goto out;
}
- dev_size = sprintf_req(&debugfs_latest_request, Buf,
+ dev_size = sprintf_req(&cont->debugfs_latest_request, Buf,
sizeof(char) * 4096);
/* No more to read if offset != 0 */
@@ -3195,38 +3144,39 @@ static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf,
size_t dev_size = 0;
int ret = 0;
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
if (Buf == NULL) {
ret = -ENOMEM;
goto out;
}
- mutex_lock(&stat_lock);
- dev_size += sprintf(Buf + dev_size, "Added jobs: %lu\n",
- stat_n_jobs_added);
- dev_size += sprintf(Buf + dev_size, "Released jobs: %lu\n",
- stat_n_jobs_released);
- dev_size += sprintf(Buf + dev_size, "Jobs in report list: %lu\n",
- stat_n_jobs_in_report_list);
- dev_size += sprintf(Buf + dev_size, "Clients in open: %lu\n",
- stat_n_in_open);
- dev_size += sprintf(Buf + dev_size, "Clients in release: %lu\n",
- stat_n_in_release);
- dev_size += sprintf(Buf + dev_size, "Clients in blt: %lu\n",
- stat_n_in_blt);
- dev_size += sprintf(Buf + dev_size, " synch: %lu\n",
- stat_n_in_blt_synch);
- dev_size += sprintf(Buf + dev_size, " add: %lu\n",
- stat_n_in_blt_add);
- dev_size += sprintf(Buf + dev_size, " wait: %lu\n",
- stat_n_in_blt_wait);
- dev_size += sprintf(Buf + dev_size, "Clients in synch 0: %lu\n",
- stat_n_in_synch_0);
- dev_size += sprintf(Buf + dev_size, "Clients in synch job: %lu\n",
- stat_n_in_synch_job);
- dev_size += sprintf(Buf + dev_size, "Clients in query_cap: %lu\n",
- stat_n_in_query_cap);
- mutex_unlock(&stat_lock);
+ mutex_lock(&cont->stat_lock);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ cont->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Released jobs : %lu\n",
+ cont->stat_n_jobs_released);
+ dev_size += sprintf(Buf + dev_size, "Jobs in report list : %lu\n",
+ cont->stat_n_jobs_in_report_list);
+ dev_size += sprintf(Buf + dev_size, "Clients in open : %lu\n",
+ cont->stat_n_in_open);
+ dev_size += sprintf(Buf + dev_size, "Clients in release : %lu\n",
+ cont->stat_n_in_release);
+ dev_size += sprintf(Buf + dev_size, "Clients in blt : %lu\n",
+ cont->stat_n_in_blt);
+ dev_size += sprintf(Buf + dev_size, " synch : %lu\n",
+ cont->stat_n_in_blt_synch);
+ dev_size += sprintf(Buf + dev_size, " add : %lu\n",
+ cont->stat_n_in_blt_add);
+ dev_size += sprintf(Buf + dev_size, " wait : %lu\n",
+ cont->stat_n_in_blt_wait);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch 0 : %lu\n",
+ cont->stat_n_in_synch_0);
+ dev_size += sprintf(Buf + dev_size, "Clients in synch job : %lu\n",
+ cont->stat_n_in_synch_job);
+ dev_size += sprintf(Buf + dev_size, "Clients in query_cap : %lu\n",
+ cont->stat_n_in_query_cap);
+ mutex_unlock(&cont->stat_lock);
/* No more to read if offset != 0 */
if (*f_pos > dev_size)
@@ -3256,37 +3206,37 @@ static const struct file_operations debugfs_b2r2_blt_stat_fops = {
};
#endif
-static void init_tmp_bufs(void)
+static void init_tmp_bufs(struct b2r2_control *cont)
{
int i = 0;
- for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
- tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
- b2r2_blt_device(), MAX_TMP_BUF_SIZE,
- &tmp_bufs[i].buf.phys_addr, GFP_DMA);
- if (tmp_bufs[i].buf.virt_addr != NULL)
- tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
+ for (i = 0; i < (sizeof(cont->tmp_bufs) / sizeof(struct tmp_buf));
+ i++) {
+ cont->tmp_bufs[i].buf.virt_addr = dma_alloc_coherent(
+ cont->dev, MAX_TMP_BUF_SIZE,
+ &cont->tmp_bufs[i].buf.phys_addr, GFP_DMA);
+ if (cont->tmp_bufs[i].buf.virt_addr != NULL)
+ cont->tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE;
else {
- b2r2_log_err("%s: Failed to allocate temp buffer %i\n",
- __func__, i);
-
- tmp_bufs[i].buf.size = 0;
+ b2r2_log_err(cont->dev, "%s: Failed to allocate temp "
+ "buffer %i\n", __func__, i);
+ cont->tmp_bufs[i].buf.size = 0;
}
}
}
-static void destroy_tmp_bufs(void)
+static void destroy_tmp_bufs(struct b2r2_control *cont)
{
int i = 0;
for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) {
- if (tmp_bufs[i].buf.size != 0) {
- dma_free_coherent(b2r2_blt_device(),
- tmp_bufs[i].buf.size,
- tmp_bufs[i].buf.virt_addr,
- tmp_bufs[i].buf.phys_addr);
+ if (cont->tmp_bufs[i].buf.size != 0) {
+ dma_free_coherent(cont->dev,
+ cont->tmp_bufs[i].buf.size,
+ cont->tmp_bufs[i].buf.virt_addr,
+ cont->tmp_bufs[i].buf.phys_addr);
- tmp_bufs[i].buf.size = 0;
+ cont->tmp_bufs[i].buf.size = 0;
}
}
}
@@ -3296,105 +3246,116 @@ static void destroy_tmp_bufs(void)
*
* Returns 0 if OK else negative error code
*/
-int b2r2_blt_module_init(void)
+int b2r2_blt_module_init(struct b2r2_control *cont)
{
int ret;
- mutex_init(&stat_lock);
+ mutex_init(&cont->stat_lock);
+
+ /* Register b2r2 driver */
+ cont->miscdev.minor = MISC_DYNAMIC_MINOR;
+ cont->miscdev.name = cont->name;
+ cont->miscdev.fops = &b2r2_blt_fops;
+
+ ret = misc_register(&cont->miscdev);
+ if (ret) {
+ printk(KERN_WARNING "%s: registering misc device fails\n",
+ __func__);
+ goto b2r2_misc_register_fail;
+ }
+
+ cont->dev = cont->miscdev.this_device;
+ dev_set_drvdata(cont->dev, cont);
#ifdef CONFIG_B2R2_GENERIC
/* Initialize generic path */
- b2r2_generic_init();
+ b2r2_generic_init(cont);
#endif
-
/* Initialize node splitter */
- ret = b2r2_node_split_init();
+ ret = b2r2_node_split_init(cont);
if (ret) {
- printk(KERN_WARNING "%s: node split init fails\n",
- __func__);
+ printk(KERN_WARNING "%s: node split init fails\n", __func__);
goto b2r2_node_split_init_fail;
}
- /* Register b2r2 driver */
- ret = misc_register(&b2r2_blt_misc_dev);
- if (ret) {
- printk(KERN_WARNING "%s: registering misc device fails\n",
- __func__);
- goto b2r2_misc_register_fail;
- }
-
- b2r2_blt_misc_dev.this_device->coherent_dma_mask = 0xFFFFFFFF;
- b2r2_blt_dev = &b2r2_blt_misc_dev;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s: device registered\n", __func__);
/*
* FIXME: This stuff should be done before the first requests i.e.
* before misc_register, but they need the device which is not
* available until after misc_register.
*/
- init_tmp_bufs();
+ cont->dev->coherent_dma_mask = 0xFFFFFFFF;
+ init_tmp_bufs(cont);
+ ret = b2r2_filters_init(cont);
+ if (ret) {
+ b2r2_log_warn(cont->dev, "%s: failed to init filters\n",
+ __func__);
+ goto b2r2_filter_init_fail;
+ }
/* Initialize memory allocator */
- ret = b2r2_mem_init(b2r2_blt_device(), B2R2_HEAP_SIZE,
+ ret = b2r2_mem_init(cont, B2R2_HEAP_SIZE,
4, sizeof(struct b2r2_node));
if (ret) {
printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n",
- __func__);
+ __func__);
goto b2r2_mem_init_fail;
}
#ifdef CONFIG_DEBUG_FS
/* Register debug fs */
- if (!debugfs_root_dir) {
- debugfs_root_dir = debugfs_create_dir("b2r2_blt", NULL);
- debugfs_create_file("latest_request",
- 0666, debugfs_root_dir,
- 0,
- &debugfs_b2r2_blt_request_fops);
- debugfs_create_file("stat",
- 0666, debugfs_root_dir,
- 0,
- &debugfs_b2r2_blt_stat_fops);
+ if (cont->debugfs_root_dir) {
+ debugfs_create_file("last_request", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_request_fops);
+ debugfs_create_file("stats", 0666,
+ cont->debugfs_root_dir,
+ cont, &debugfs_b2r2_blt_stat_fops);
}
#endif
- goto out;
-b2r2_misc_register_fail:
-b2r2_mem_init_fail:
- b2r2_node_split_exit();
+ b2r2_ctl[cont->id] = cont;
+ b2r2_log_info(cont->dev, "%s: done\n", __func__);
+
+ return ret;
+b2r2_mem_init_fail:
+ b2r2_filters_exit(cont);
+b2r2_filter_init_fail:
+ b2r2_node_split_exit(cont);
b2r2_node_split_init_fail:
#ifdef CONFIG_B2R2_GENERIC
- b2r2_generic_exit();
+ b2r2_generic_exit(cont);
#endif
-out:
+ misc_deregister(&cont->miscdev);
+b2r2_misc_register_fail:
return ret;
}
/**
* b2r2_module_exit() - Module exit function
*/
-void b2r2_blt_module_exit(void)
+void b2r2_blt_module_exit(struct b2r2_control *cont)
{
+ if (cont) {
+ b2r2_log_info(cont->dev, "%s\n", __func__);
#ifdef CONFIG_DEBUG_FS
- if (debugfs_root_dir) {
- debugfs_remove_recursive(debugfs_root_dir);
- debugfs_root_dir = NULL;
- }
+ if (cont->debugfs_root_dir) {
+ debugfs_remove_recursive(cont->debugfs_root_dir);
+ cont->debugfs_root_dir = NULL;
+ }
#endif
- if (b2r2_blt_dev) {
- b2r2_log_info("%s\n", __func__);
- b2r2_mem_exit();
- destroy_tmp_bufs();
- b2r2_blt_dev = NULL;
- misc_deregister(&b2r2_blt_misc_dev);
- }
-
- b2r2_node_split_exit();
-
+ b2r2_mem_exit(cont);
+ destroy_tmp_bufs(cont);
+ b2r2_ctl[cont->id] = NULL;
+ misc_deregister(&cont->miscdev);
+ b2r2_node_split_exit(cont);
#if defined(CONFIG_B2R2_GENERIC)
- b2r2_generic_exit();
+ b2r2_generic_exit(cont);
#endif
+ b2r2_filters_exit(cont);
+ }
}
MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>");
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
index 7a11a301d11..629633a7888 100644
--- a/drivers/video/b2r2/b2r2_core.c
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/err.h>
+#include "b2r2_internal.h"
#include "b2r2_core.h"
#include "b2r2_global.h"
#include "b2r2_structures.h"
@@ -103,6 +104,15 @@
*/
#define B2R2_CORE_HIGHEST_PRIO 20
+/**
+ * B2R2_DOMAIN_DISABLE -
+ */
+#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
+
+/**
+ * B2R2_REGULATOR_RETRY_COUNT -
+ */
+#define B2R2_REGULATOR_RETRY_COUNT 10
/**
* B2R2 Hardware defines below
@@ -239,7 +249,8 @@ struct b2r2_core {
u16 min_req_time;
int irq;
- struct device *log_dev;
+ char name[16];
+ struct device *dev;
struct list_head prio_queue;
@@ -267,6 +278,7 @@ struct b2r2_core {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_core_root_dir;
struct dentry *debugfs_regs_dir;
#endif
@@ -291,88 +303,96 @@ struct b2r2_core {
struct clk *b2r2_clock;
struct regulator *b2r2_reg;
+
+ struct b2r2_control *control;
};
/**
- * b2r2_core - Administration data for B2R2 core (singleton)
+ * b2r2_core - Quick link to administration data for B2R2
*/
-static struct b2r2_core b2r2_core;
+static struct b2r2_core *b2r2_core[B2R2_MAX_NBR_DEVICES];
/* Local functions */
-static void check_prio_list(bool atomic);
-static void clear_interrupts(void);
-static void trigger_job(struct b2r2_core_job *job);
-static void exit_job_list(struct list_head *job_list);
-static int get_next_job_id(void);
+static void check_prio_list(struct b2r2_core *core, bool atomic);
+static void clear_interrupts(struct b2r2_core *core);
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job);
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_list);
+static int get_next_job_id(struct b2r2_core *core);
static void job_work_function(struct work_struct *ptr);
static void init_job(struct b2r2_core_job *job);
-static void insert_into_prio_list(struct b2r2_core_job *job);
-static struct b2r2_core_job *find_job_in_list(
- int job_id,
- struct list_head *list);
-static struct b2r2_core_job *find_job_in_active_jobs(int job_id);
-static struct b2r2_core_job *find_tag_in_list(
- int tag,
- struct list_head *list);
-static struct b2r2_core_job *find_tag_in_active_jobs(int tag);
-
-static int domain_enable(void);
-static void domain_disable(void);
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job);
+static struct b2r2_core_job *find_job_in_list(int job_id,
+ struct list_head *list);
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id);
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list);
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag);
+
+static int domain_enable(struct b2r2_core *core);
+static void domain_disable(struct b2r2_core *core);
static void stop_queue(enum b2r2_core_queue queue);
#ifdef HANDLE_TIMEOUTED_JOBS
-static void printk_regs(void);
-static int hw_reset(void);
+static void printk_regs(struct b2r2_core *core);
+static int hw_reset(struct b2r2_core *core);
static void timeout_work_function(struct work_struct *ptr);
#endif
static void reset_hw_timer(struct b2r2_core_job *job);
static void start_hw_timer(struct b2r2_core_job *job);
-static void stop_hw_timer(struct b2r2_core_job *job);
+static void stop_hw_timer(struct b2r2_core *core,
+ struct b2r2_core_job *job);
-static int init_hw(void);
-static void exit_hw(void);
+static int init_hw(struct b2r2_core *core);
+static void exit_hw(struct b2r2_core *core);
/* Tracking release bug... */
#ifdef DEBUG_CHECK_ADDREF_RELEASE
/**
* ar_add() - Adds an addref or a release to the array
*
+ * @core: The b2r2 core entity
* @job: The job that has been referenced
* @caller: The caller of addref / release
* @addref: true if it is an addref else false for release
*/
-void ar_add(struct b2r2_core_job *job, const char *caller, bool addref)
+static void ar_add(struct b2r2_core *core, struct b2r2_core_job *job,
+ const char *caller, bool addref)
{
- b2r2_core.ar[b2r2_core.ar_write].addref = addref;
- b2r2_core.ar[b2r2_core.ar_write].job = job;
- b2r2_core.ar[b2r2_core.ar_write].caller = caller;
- b2r2_core.ar[b2r2_core.ar_write].ref_count = job->ref_count;
- b2r2_core.ar_write = (b2r2_core.ar_write + 1) %
- ARRAY_SIZE(b2r2_core.ar);
- if (b2r2_core.ar_write == b2r2_core.ar_read)
- b2r2_core.ar_read = (b2r2_core.ar_read + 1) %
- ARRAY_SIZE(b2r2_core.ar);
+ core->ar[core->ar_write].addref = addref;
+ core->ar[core->ar_write].job = job;
+ core->ar[core->ar_write].caller = caller;
+ core->ar[core->ar_write].ref_count = job->ref_count;
+ core->ar_write = (core->ar_write + 1) %
+ ARRAY_SIZE(core->ar);
+ if (core->ar_write == core->ar_read)
+ core->ar_read = (core->ar_read + 1) %
+ ARRAY_SIZE(core->ar);
}
/**
* sprintf_ar() - Writes all addref / release to a string buffer
*
+ * @core: The b2r2 core entity
* @buf: Receiving character bufefr
* @job: Which job to write or NULL for all
*
* NOTE! No buffer size check!!
*/
-char *sprintf_ar(char *buf, struct b2r2_core_job *job)
+static char *sprintf_ar(struct b2r2_core *core, char *buf,
+ struct b2r2_core_job *job)
{
int i;
int size = 0;
- for (i = b2r2_core.ar_read;
- i != b2r2_core.ar_write;
- i = (i + 1) % ARRAY_SIZE(b2r2_core.ar)) {
- struct addref_release *ar = &b2r2_core.ar[i];
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
if (!job || job == ar->job)
size += sprintf(buf + size,
"%s on %p from %s, ref = %d\n",
@@ -386,21 +406,21 @@ char *sprintf_ar(char *buf, struct b2r2_core_job *job)
/**
* printk_ar() - Writes all addref / release using dev_info
*
+ * @core: The b2r2 core entity
* @job: Which job to write or NULL for all
*/
-void printk_ar(struct b2r2_core_job *job)
+static void printk_ar(struct b2r2_core *core, struct b2r2_core_job *job)
{
int i;
- for (i = b2r2_core.ar_read;
- i != b2r2_core.ar_write;
- i = (i + 1) % ARRAY_SIZE(b2r2_core.ar)) {
- struct addref_release *ar = &b2r2_core.ar[i];
+ for (i = core->ar_read; i != core->ar_write;
+ i = (i + 1) % ARRAY_SIZE(core->ar)) {
+ struct addref_release *ar = &core->ar[i];
if (!job || job == ar->job)
- b2r2_log_info("%s on %p from %s,"
- " ref = %d\n",
- ar->addref ? "addref" : "release",
- ar->job, ar->caller, ar->ref_count);
+ b2r2_log_info(core->dev, "%s on %p from %s,"
+ " ref = %d\n",
+ ar->addref ? "addref" : "release",
+ ar->job, ar->caller, ar->ref_count);
}
}
#endif
@@ -408,32 +428,34 @@ void printk_ar(struct b2r2_core_job *job)
/**
* internal_job_addref() - Increments the reference count for a job
*
+ * @core: The b2r2 core entity
* @job: Which job to increment reference count for
* @caller: Name of function calling addref (for debug)
*
- * Note that b2r2_core.lock _must_ be held
+ * Note that core->lock _must_ be held
*/
-static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
+static void internal_job_addref(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
{
u32 ref_count;
- b2r2_log_info("%s (%p) (from %s)\n",
- __func__, job, caller);
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
/* Sanity checks */
BUG_ON(job == NULL);
if (job->start_sentinel != START_SENTINEL ||
- job->end_sentinel != END_SENTINEL ||
- job->ref_count == 0 || job->ref_count > 10) {
- b2r2_log_info(
- "%s: (%p) start=%X end=%X ref_count=%d\n",
- __func__, job, job->start_sentinel,
- job->end_sentinel, job->ref_count);
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
/* Something is wrong, print the addref / release array */
#ifdef DEBUG_CHECK_ADDREF_RELEASE
- printk_ar(NULL);
+ printk_ar(core, NULL);
#endif
}
@@ -446,61 +468,61 @@ static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
#ifdef DEBUG_CHECK_ADDREF_RELEASE
/* Keep track of addref / release */
- ar_add(job, caller, true);
+ ar_add(core, job, caller, true);
#endif
- b2r2_log_info("%s called from %s (%p): Ref Count is %d\n",
- __func__, caller, job, job->ref_count);
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p): Ref Count is "
+ "%d\n", __func__, caller, core, job, job->ref_count);
}
/**
* internal_job_release() - Decrements the reference count for a job
*
+ * @core: The b2r2 core entity
* @job: Which job to decrement reference count for
* @caller: Name of function calling release (for debug)
*
* Returns true if job_release should be called by caller
* (reference count reached zero).
*
- * Note that b2r2_core.lock _must_ be held
+ * Note that core->lock _must_ be held
*/
-bool internal_job_release(struct b2r2_core_job *job, const char *caller)
+static bool internal_job_release(struct b2r2_core *core,
+ struct b2r2_core_job *job, const char *caller)
{
u32 ref_count;
bool call_release = false;
- b2r2_log_info("%s (%p) (from %s)\n",
- __func__, job, caller);
-
/* Sanity checks */
BUG_ON(job == NULL);
+ b2r2_log_info(core->dev, "%s (%p, %p) (from %s)\n",
+ __func__, core, job, caller);
+
if (job->start_sentinel != START_SENTINEL ||
- job->end_sentinel != END_SENTINEL ||
- job->ref_count == 0 || job->ref_count > 10) {
- b2r2_log_info(
- "%s: (%p) start=%X end=%X ref_count=%d\n",
- __func__, job, job->start_sentinel,
- job->end_sentinel, job->ref_count);
+ job->end_sentinel != END_SENTINEL ||
+ job->ref_count == 0 || job->ref_count > 10) {
+ b2r2_log_info(core->dev, "%s: (%p, %p) start=%X end=%X "
+ "ref_count=%d\n", __func__, core, job,
+ job->start_sentinel, job->end_sentinel,
+ job->ref_count);
#ifdef DEBUG_CHECK_ADDREF_RELEASE
- printk_ar(NULL);
+ printk_ar(core, NULL);
#endif
}
-
BUG_ON(job->start_sentinel != START_SENTINEL);
BUG_ON(job->end_sentinel != END_SENTINEL);
-
BUG_ON(job->ref_count == 0 || job->ref_count > 10);
/* Do the actual decrement */
ref_count = --job->ref_count;
#ifdef DEBUG_CHECK_ADDREF_RELEASE
- ar_add(job, caller, false);
+ ar_add(core, job, caller, false);
#endif
- b2r2_log_info("%s called from %s (%p) Ref Count is %d\n",
- __func__, caller, job, ref_count);
+ b2r2_log_info(core->dev, "%s called from %s (%p, %p) Ref Count is "
+ "%d\n", __func__, caller, core, job, ref_count);
if (!ref_count && job->release) {
call_release = true;
@@ -515,40 +537,60 @@ bool internal_job_release(struct b2r2_core_job *job, const char *caller)
/* Exported functions */
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller)
{
unsigned long flags;
- spin_lock_irqsave(&b2r2_core.lock, flags);
- internal_job_addref(job, caller);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ internal_job_addref(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller)
{
unsigned long flags;
bool call_release = false;
- spin_lock_irqsave(&b2r2_core.lock, flags);
- call_release = internal_job_release(job, caller);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ spin_lock_irqsave(&core->lock, flags);
+ call_release = internal_job_release(core, job, caller);
+ spin_unlock_irqrestore(&core->lock, flags);
if (call_release)
job->release(job);
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
-int b2r2_core_job_add(struct b2r2_core_job *job)
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job)
{
unsigned long flags;
+ struct b2r2_core *core = control->data;
- b2r2_log_info("%s (%p)\n", __func__, job);
+ b2r2_log_info(core->dev, "%s (%p, %p)\n", __func__, control, job);
/* Enable B2R2 */
- domain_enable();
+ domain_enable(core);
- spin_lock_irqsave(&b2r2_core.lock, flags);
- b2r2_core.stat_n_jobs_added++;
+ spin_lock_irqsave(&core->lock, flags);
+ core->stat_n_jobs_added++;
/* Initialise internal job data */
init_job(job);
@@ -557,51 +599,59 @@ int b2r2_core_job_add(struct b2r2_core_job *job)
job->ref_count = 1;
/* Insert job into prio list */
- insert_into_prio_list(job);
+ insert_into_prio_list(core, job);
/* Check if we can dispatch job */
- check_prio_list(false);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ check_prio_list(core, false);
+ spin_unlock_irqrestore(&core->lock, flags);
return 0;
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
-struct b2r2_core_job *b2r2_core_job_find(int job_id)
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id)
{
unsigned long flags;
struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
- b2r2_log_info("%s (%d)\n", __func__, job_id);
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, job_id);
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
/* Look through prio queue */
- job = find_job_in_list(job_id, &b2r2_core.prio_queue);
+ job = find_job_in_list(job_id, &core->prio_queue);
if (!job)
- job = find_job_in_active_jobs(job_id);
+ job = find_job_in_active_jobs(core, job_id);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
return job;
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
-struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag)
+/**
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag)
{
unsigned long flags;
struct b2r2_core_job *job;
+ struct b2r2_core *core = control->data;
- b2r2_log_info("%s (%d)\n", __func__, tag);
+ b2r2_log_info(core->dev, "%s (%p, %d)\n", __func__, control, tag);
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
/* Look through prio queue */
- job = find_tag_in_list(tag, &b2r2_core.prio_queue);
+ job = find_tag_in_list(core, tag, &core->prio_queue);
if (!job)
- job = find_tag_in_active_jobs(tag);
+ job = find_tag_in_active_jobs(core, tag);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
return job;
}
@@ -613,32 +663,48 @@ struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag)
*
* Returns true if job is done or cancelled
*
- * b2r2_core.lock must _NOT_ be held when calling this function
+ * core->lock must _NOT_ be held when calling this function
*/
static bool is_job_done(struct b2r2_core_job *job)
{
unsigned long flags;
bool job_is_done;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
job_is_done =
job->job_state != B2R2_CORE_JOB_QUEUED &&
job->job_state != B2R2_CORE_JOB_RUNNING;
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
return job_is_done;
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+/**
+ * b2r2_core_job_wait()
+ *
+ * @job:
+ *
+ * core->lock _must_ _NOT_ be held when calling this function
+ */
int b2r2_core_job_wait(struct b2r2_core_job *job)
{
int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
- b2r2_log_info("%s (%p)\n", __func__, job);
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
+ b2r2_log_info(core->dev, "%s (%p)\n", __func__, job);
/* Check that we have the job */
if (job->job_state == B2R2_CORE_JOB_IDLE) {
/* Never or not queued */
- b2r2_log_info("%s: Job not queued\n", __func__);
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
return -ENOENT;
}
@@ -648,9 +714,9 @@ int b2r2_core_job_wait(struct b2r2_core_job *job)
is_job_done(job));
if (ret)
- b2r2_log_warn(
- "%s: wait_event_interruptible returns %d, state is %d",
- __func__, ret, job->job_state);
+ b2r2_log_warn(core->dev,
+ "%s: wait_event_interruptible returns %d state is %d",
+ __func__, ret, job->job_state);
return ret;
}
@@ -662,9 +728,9 @@ int b2r2_core_job_wait(struct b2r2_core_job *job)
*
* Returns true if the job was found and cancelled
*
- * b2r2_core.lock must be held when calling this function
+ * core->lock must be held when calling this function
*/
-static bool cancel_job(struct b2r2_core_job *job)
+static bool cancel_job(struct b2r2_core *core, struct b2r2_core_job *job)
{
bool found_job = false;
bool job_was_active = false;
@@ -676,152 +742,168 @@ static bool cancel_job(struct b2r2_core_job *job)
}
/* Remove from active jobs */
- if (!found_job) {
- if (b2r2_core.n_active_jobs > 0) {
- int i;
+ if (!found_job && core->n_active_jobs > 0) {
+ int i;
- /* Look for timeout:ed jobs and put them in tmp list */
- for (i = 0;
- i < ARRAY_SIZE(b2r2_core.active_jobs);
- i++) {
- if (b2r2_core.active_jobs[i] == job) {
- stop_queue((enum b2r2_core_queue)i);
- stop_hw_timer(job);
- b2r2_core.active_jobs[i] = NULL;
- b2r2_core.n_active_jobs--;
- found_job = true;
- job_was_active = true;
- }
+ /* Look for timeout:ed jobs and put them in tmp list */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ if (core->active_jobs[i] == job) {
+ stop_queue((enum b2r2_core_queue)i);
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ found_job = true;
+ job_was_active = true;
}
}
}
-
/* Handle done list & callback */
if (found_job) {
/* Job is canceled */
job->job_state = B2R2_CORE_JOB_CANCELED;
- queue_work(b2r2_core.work_queue, &job->work);
+ queue_work(core->work_queue, &job->work);
/* Statistics */
if (!job_was_active)
- b2r2_core.stat_n_jobs_in_prio_list--;
+ core->stat_n_jobs_in_prio_list--;
}
return found_job;
}
-/* b2r2_core.lock _must_ _NOT_ be held when calling this function */
+/* core->lock _must_ _NOT_ be held when calling this function */
int b2r2_core_job_cancel(struct b2r2_core_job *job)
{
unsigned long flags;
int ret = 0;
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
- b2r2_log_info("%s (%p) (%d)\n", __func__,
- job, job->job_state);
+ b2r2_log_info(core->dev, "%s (%p) (%d)\n",
+ __func__, job, job->job_state);
/* Check that we have the job */
if (job->job_state == B2R2_CORE_JOB_IDLE) {
/* Never or not queued */
- b2r2_log_info("%s: Job not queued\n", __func__);
+ b2r2_log_info(core->dev, "%s: Job not queued\n", __func__);
return -ENOENT;
}
/* Remove from prio list */
- spin_lock_irqsave(&b2r2_core.lock, flags);
- cancel_job(job);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
+ cancel_job(core, job);
+ spin_unlock_irqrestore(&core->lock, flags);
return ret;
}
/* LOCAL FUNCTIONS BELOW */
-#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100)
-
+/**
+ * domain_disable_work_function()
+ *
+ * @core: The b2r2 core entity
+ */
static void domain_disable_work_function(struct work_struct *work)
{
- if (!mutex_trylock(&b2r2_core.domain_lock))
+ struct delayed_work *twork = to_delayed_work(work);
+ struct b2r2_core *core = container_of(
+ twork, struct b2r2_core, domain_disable_work);
+
+ if (!mutex_trylock(&core->domain_lock))
return;
- if (b2r2_core.domain_request_count == 0) {
- exit_hw();
- clk_disable(b2r2_core.b2r2_clock);
- regulator_disable(b2r2_core.b2r2_reg);
- b2r2_core.domain_enabled = false;
+ if (core->domain_request_count == 0) {
+ exit_hw(core);
+ clk_disable(core->b2r2_clock);
+ regulator_disable(core->b2r2_reg);
+ core->domain_enabled = false;
}
- mutex_unlock(&b2r2_core.domain_lock);
+ mutex_unlock(&core->domain_lock);
}
-#define B2R2_REGULATOR_RETRY_COUNT 10
-
-static int domain_enable(void)
+/**
+ * domain_enable()
+ *
+ * @core: The b2r2 core entity
+ */
+static int domain_enable(struct b2r2_core *core)
{
- mutex_lock(&b2r2_core.domain_lock);
- b2r2_core.domain_request_count++;
+ mutex_lock(&core->domain_lock);
+ core->domain_request_count++;
- if (!b2r2_core.domain_enabled) {
+ if (!core->domain_enabled) {
int retry = 0;
int ret;
-
again:
/*
* Since regulator_enable() may sleep we have to handle
* interrupts.
*/
- ret = regulator_enable(b2r2_core.b2r2_reg);
+ ret = regulator_enable(core->b2r2_reg);
if ((ret == -EAGAIN) &&
((retry++) < B2R2_REGULATOR_RETRY_COUNT))
goto again;
else if (ret < 0)
goto regulator_enable_failed;
- clk_enable(b2r2_core.b2r2_clock);
- if (init_hw() < 0)
+ clk_enable(core->b2r2_clock);
+ if (init_hw(core) < 0)
goto init_hw_failed;
- b2r2_core.domain_enabled = true;
+ core->domain_enabled = true;
}
- mutex_unlock(&b2r2_core.domain_lock);
+ mutex_unlock(&core->domain_lock);
return 0;
init_hw_failed:
- b2r2_log_err("%s: Could not initialize hardware!\n", __func__);
+ b2r2_log_err(core->dev,
+ "%s: Could not initialize hardware!\n", __func__);
- clk_disable(b2r2_core.b2r2_clock);
+ clk_disable(core->b2r2_clock);
- if (regulator_disable(b2r2_core.b2r2_reg) < 0)
- b2r2_log_err("%s: regulator_disable failed!\n", __func__);
+ if (regulator_disable(core->b2r2_reg) < 0)
+ b2r2_log_err(core->dev, "%s: regulator_disable failed!\n",
+ __func__);
regulator_enable_failed:
- b2r2_core.domain_request_count--;
- mutex_unlock(&b2r2_core.domain_lock);
+ core->domain_request_count--;
+ mutex_unlock(&core->domain_lock);
return -EFAULT;
}
-static void domain_disable(void)
+/**
+ * domain_disable()
+ *
+ * @core: The b2r2 core entity
+ */
+static void domain_disable(struct b2r2_core *core)
{
- mutex_lock(&b2r2_core.domain_lock);
+ mutex_lock(&core->domain_lock);
- if (b2r2_core.domain_request_count == 0) {
- b2r2_log_err("%s: Unbalanced domain_disable()\n", __func__);
+ if (core->domain_request_count == 0) {
+ b2r2_log_err(core->dev,
+ "%s: Unbalanced domain_disable()\n", __func__);
} else {
- b2r2_core.domain_request_count--;
+ core->domain_request_count--;
/* Cancel any existing work */
- cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+ cancel_delayed_work_sync(&core->domain_disable_work);
/* Add a work to disable the power and clock after a delay */
- queue_delayed_work(b2r2_core.work_queue,
- &b2r2_core.domain_disable_work,
+ queue_delayed_work(core->work_queue, &core->domain_disable_work,
B2R2_DOMAIN_DISABLE_TIMEOUT);
}
- mutex_unlock(&b2r2_core.domain_lock);
+ mutex_unlock(&core->domain_lock);
}
/**
@@ -829,47 +911,52 @@ static void domain_disable(void)
*/
static void stop_queue(enum b2r2_core_queue queue)
{
- /* TODO: Implement! If this function is not implemented canceled jobs will
- use b2r2 which is a waste of resources. Not stopping jobs will also screw up
- the hardware timing, the job the canceled job intrerrupted (if any) will be
- billed for the time between the point where the job is cancelled and when it
- stops. */
+ /* TODO: Implement! If this function is not implemented canceled jobs
+ * will use b2r2 which is a waste of resources. Not stopping jobs will
+ * also screw up the hardware timing, the job the canceled job
+ * intrerrupted (if any) will be billed for the time between the point
+ * where the job is cancelled and when it stops. */
}
/**
* exit_job_list() - Empties a job queue by canceling the jobs
*
- * b2r2_core.lock _must_ be held when calling this function
+ * @core: The b2r2 core entity
+ *
+ * core->lock _must_ be held when calling this function
*/
-static void exit_job_list(struct list_head *job_queue)
+static void exit_job_list(struct b2r2_core *core,
+ struct list_head *job_queue)
{
while (!list_empty(job_queue)) {
struct b2r2_core_job *job =
list_entry(job_queue->next,
- struct b2r2_core_job,
- list);
+ struct b2r2_core_job,
+ list);
/* Add reference to prevent job from disappearing
in the middle of our work, released below */
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
- cancel_job(job);
+ cancel_job(core, job);
/* Matching release to addref above */
- internal_job_release(job, __func__);
+ internal_job_release(core, job, __func__);
}
}
/**
* get_next_job_id() - Return a new job id.
+ *
+ * @core: The b2r2 core entity
*/
-static int get_next_job_id(void)
+static int get_next_job_id(struct b2r2_core *core)
{
int job_id;
- if (b2r2_core.next_job_id < 1)
- b2r2_core.next_job_id = 1;
- job_id = b2r2_core.next_job_id++;
+ if (core->next_job_id < 1)
+ core->next_job_id = 1;
+ job_id = core->next_job_id++;
return job_id;
}
@@ -883,22 +970,27 @@ static int get_next_job_id(void)
static void job_work_function(struct work_struct *ptr)
{
unsigned long flags;
- struct b2r2_core_job *job = container_of(
- ptr, struct b2r2_core_job, work);
+ struct b2r2_core_job *job =
+ container_of(ptr, struct b2r2_core_job, work);
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
/* Disable B2R2 */
- domain_disable();
+ domain_disable(core);
/* Release resources */
if (job->release_resources)
job->release_resources(job, false);
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
/* Dispatch a new job if possible */
- check_prio_list(false);
+ check_prio_list(core, false);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
/* Tell the client */
if (job->callback)
@@ -922,53 +1014,52 @@ static void timeout_work_function(struct work_struct *ptr)
{
unsigned long flags;
struct list_head job_list;
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct b2r2_core *core = container_of(twork, struct b2r2_core,
+ timeout_work);
INIT_LIST_HEAD(&job_list);
/* Cancel all jobs if too long time since last irq */
- spin_lock_irqsave(&b2r2_core.lock, flags);
- if (b2r2_core.n_active_jobs > 0) {
+ spin_lock_irqsave(&core->lock, flags);
+ if (core->n_active_jobs > 0) {
unsigned long diff =
- (long) jiffies - (long) b2r2_core.jiffies_last_irq;
+ (long) jiffies - (long) core->jiffies_last_irq;
if (diff > HZ/2) {
/* Active jobs and more than a second since last irq! */
int i;
- /* Look for timeout:ed jobs and put them in tmp list. It's
- important that the application queues are killed in order
- of decreasing priority */
- for (i = 0;
- i < ARRAY_SIZE(b2r2_core.active_jobs);
- i++) {
+ /* Look for timeout:ed jobs and put them in tmp list.
+ * It's important that the application queues are
+ * killed in order of decreasing priority */
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
struct b2r2_core_job *job =
- b2r2_core.active_jobs[i];
+ core->active_jobs[i];
if (job) {
- stop_hw_timer(job);
-
- b2r2_core.active_jobs[i] = NULL;
- b2r2_core.n_active_jobs--;
- list_add_tail(&job->list,
- &job_list);
+ stop_hw_timer(core, job);
+ core->active_jobs[i] = NULL;
+ core->n_active_jobs--;
+ list_add_tail(&job->list, &job_list);
}
}
/* Print the B2R2 register and reset B2R2 */
- printk_regs();
- hw_reset();
+ printk_regs(core);
+ hw_reset(core);
}
}
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
/* Handle timeout:ed jobs */
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
while (!list_empty(&job_list)) {
struct b2r2_core_job *job =
list_entry(job_list.next,
- struct b2r2_core_job,
- list);
+ struct b2r2_core_job,
+ list);
- b2r2_log_warn("%s: Job timeout\n", __func__);
+ b2r2_log_warn(core->dev, "%s: Job timeout\n", __func__);
list_del_init(&job->list);
@@ -979,16 +1070,16 @@ static void timeout_work_function(struct work_struct *ptr)
wake_up_interruptible(&job->event);
/* Job callbacks handled via work queue */
- queue_work(b2r2_core.work_queue, &job->work);
+ queue_work(core->work_queue, &job->work);
}
/* Requeue delayed work */
- if (b2r2_core.n_active_jobs)
+ if (core->n_active_jobs)
queue_delayed_work(
- b2r2_core.work_queue,
- &b2r2_core.timeout_work, HZ/2);
+ core->work_queue,
+ &core->timeout_work, HZ/2);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
}
#endif
@@ -998,7 +1089,7 @@ static void timeout_work_function(struct work_struct *ptr)
*
* @job: Pointer to job struct
*
- * b2r2_core.lock _must_ be held when calling this function
+ * core->lock _must_ be held when calling this function
*/
static void reset_hw_timer(struct b2r2_core_job *job)
{
@@ -1012,7 +1103,7 @@ static void reset_hw_timer(struct b2r2_core_job *job)
*
* @job: Pointer to job struct
*
- * b2r2_core.lock _must_ be held when calling this function
+ * core->lock _must_ be held when calling this function
*/
static void start_hw_timer(struct b2r2_core_job *job)
{
@@ -1024,11 +1115,12 @@ static void start_hw_timer(struct b2r2_core_job *job)
* Should be called immediatly after the hardware has
* finished.
*
+ * @core: The b2r2 core entity
* @job: Pointer to job struct
*
- * b2r2_core.lock _must_ be held when calling this function
+ * core->lock _must_ be held when calling this function
*/
-static void stop_hw_timer(struct b2r2_core_job *job)
+static void stop_hw_timer(struct b2r2_core *core, struct b2r2_core_job *job)
{
/* Assumes only app queues are used, which is the case right now. */
/* Not 100% accurate. When a higher prio job interrupts a lower prio job it does
@@ -1058,7 +1150,7 @@ static void stop_hw_timer(struct b2r2_core_job *job)
/* Check if we have delayed the start of higher prio jobs. Can happen as queue
switching only can be done between nodes. */
for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
- struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
if (NULL == queue_active_job)
continue;
@@ -1067,17 +1159,21 @@ static void stop_hw_timer(struct b2r2_core_job *job)
/* Check if the job has stolen time from lower prio jobs */
for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
- struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
+ struct b2r2_core_job *queue_active_job = core->active_jobs[i];
u32 queue_active_job_hw_start_time;
if (NULL == queue_active_job)
continue;
- queue_active_job_hw_start_time = queue_active_job->hw_start_time + time_pos_offset;
+ queue_active_job_hw_start_time =
+ queue_active_job->hw_start_time +
+ time_pos_offset;
if (queue_active_job_hw_start_time < stop_time) {
- u32 queue_active_job_nsec_in_hw = stop_time - queue_active_job_hw_start_time;
- u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw, nsec_in_hw);
+ u32 queue_active_job_nsec_in_hw = stop_time -
+ queue_active_job_hw_start_time;
+ u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw,
+ nsec_in_hw);
queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec;
@@ -1098,11 +1194,17 @@ static void stop_hw_timer(struct b2r2_core_job *job)
*/
static void init_job(struct b2r2_core_job *job)
{
+ struct b2r2_blt_instance *instance;
+ struct b2r2_core *core;
+
+ instance = (struct b2r2_blt_instance *) job->tag;
+ core = instance->control->data;
+
job->start_sentinel = START_SENTINEL;
job->end_sentinel = END_SENTINEL;
/* Get a job id*/
- job->job_id = get_next_job_id();
+ job->job_id = get_next_job_id(core);
/* Job is idle, never queued */
job->job_state = B2R2_CORE_JOB_IDLE;
@@ -1144,62 +1246,60 @@ static void init_job(struct b2r2_core_job *job)
/**
* clear_interrupts() - Disables all interrupts
*
- * b2r2_core.lock must be held
+ * core->lock _must_ be held
*/
-static void clear_interrupts(void)
+static void clear_interrupts(struct b2r2_core *core)
{
- writel(0x0, &b2r2_core.hw->BLT_ITM0);
- writel(0x0, &b2r2_core.hw->BLT_ITM1);
- writel(0x0, &b2r2_core.hw->BLT_ITM2);
- writel(0x0, &b2r2_core.hw->BLT_ITM3);
+ writel(0x0, &core->hw->BLT_ITM0);
+ writel(0x0, &core->hw->BLT_ITM1);
+ writel(0x0, &core->hw->BLT_ITM2);
+ writel(0x0, &core->hw->BLT_ITM3);
}
/**
* insert_into_prio_list() - Inserts the job into the sorted list of jobs.
* The list is sorted by priority.
*
+ * @core: The b2r2 core entity
* @job: Job to insert
*
- * b2r2_core.lock must be held
+ * core->lock _must_ be held
*/
-static void insert_into_prio_list(struct b2r2_core_job *job)
+static void insert_into_prio_list(struct b2r2_core *core,
+ struct b2r2_core_job *job)
{
/* Ref count is increased when job put in list,
should be released when job is removed from list */
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
- b2r2_core.stat_n_jobs_in_prio_list++;
+ core->stat_n_jobs_in_prio_list++;
/* Sort in the job */
- if (list_empty(&b2r2_core.prio_queue))
- list_add_tail(&job->list, &b2r2_core.prio_queue);
+ if (list_empty(&core->prio_queue))
+ list_add_tail(&job->list, &core->prio_queue);
else {
- struct b2r2_core_job *first_job =
- list_entry(b2r2_core.prio_queue.next,
+ struct b2r2_core_job *first_job = list_entry(
+ core->prio_queue.next,
struct b2r2_core_job, list);
- struct b2r2_core_job *last_job =
- list_entry(b2r2_core.prio_queue.prev,
+ struct b2r2_core_job *last_job = list_entry(
+ core->prio_queue.prev,
struct b2r2_core_job, list);
- /* High prio job? */
if (job->prio > first_job->prio)
- /* Insert first */
- list_add(&job->list, &b2r2_core.prio_queue);
+ list_add(&job->list, &core->prio_queue);
else if (job->prio <= last_job->prio)
- /* Insert last */
- list_add_tail(&job->list, &b2r2_core.prio_queue);
+ list_add_tail(&job->list, &core->prio_queue);
else {
/* We need to find where to put it */
struct list_head *ptr;
- list_for_each(ptr, &b2r2_core.prio_queue) {
+ list_for_each(ptr, &core->prio_queue) {
struct b2r2_core_job *list_job =
list_entry(ptr, struct b2r2_core_job,
- list);
+ list);
if (job->prio > list_job->prio) {
- /* Add before */
list_add_tail(&job->list,
- &list_job->list);
+ &list_job->list);
break;
}
}
@@ -1213,71 +1313,67 @@ static void insert_into_prio_list(struct b2r2_core_job *job)
* check_prio_list() - Checks if the first job(s) in the prio list can
* be dispatched to B2R2
*
+ * @core: The b2r2 core entity
* @atomic: true if in atomic context (i.e. interrupt context)
*
- * b2r2_core.lock must be held
+ * core->lock _must_ be held
*/
-static void check_prio_list(bool atomic)
+static void check_prio_list(struct b2r2_core *core, bool atomic)
{
bool dispatched_job;
int n_dispatched = 0;
+ struct b2r2_core_job *job;
- /* Do we have anything in our prio list? */
do {
dispatched_job = false;
- if (!list_empty(&b2r2_core.prio_queue)) {
- /* The first job waiting */
- struct b2r2_core_job *job =
- list_first_entry(&b2r2_core.prio_queue,
- struct b2r2_core_job,
- list);
-
- /* Is the B2R2 queue available? */
- if (b2r2_core.active_jobs[job->queue] == NULL) {
- /* Can we acquire resources? */
- if (!job->acquire_resources ||
- job->acquire_resources(job, atomic) == 0) {
- /* Ok to dispatch job */
-
- /* Remove from list */
- list_del_init(&job->list);
-
- /* The job is now active */
- b2r2_core.active_jobs[job->queue] = job;
- b2r2_core.n_active_jobs++;
- job->jiffies = jiffies;
- b2r2_core.jiffies_last_active =
- jiffies;
-
- /* Kick off B2R2 */
- trigger_job(job);
-
- dispatched_job = true;
- n_dispatched++;
+
+ /* Do we have anything in our prio list? */
+ if (list_empty(&core->prio_queue))
+ break;
+
+ /* The first job waiting */
+ job = list_first_entry(&core->prio_queue,
+ struct b2r2_core_job, list);
+
+ /* Is the B2R2 queue available? */
+ if (core->active_jobs[job->queue] != NULL)
+ break;
+
+ /* Can we acquire resources? */
+ if (!job->acquire_resources ||
+ job->acquire_resources(job, atomic) == 0) {
+ /* Ok to dispatch job */
+
+ /* Remove from list */
+ list_del_init(&job->list);
+
+ /* The job is now active */
+ core->active_jobs[job->queue] = job;
+ core->n_active_jobs++;
+ job->jiffies = jiffies;
+ core->jiffies_last_active = jiffies;
+
+ /* Kick off B2R2 */
+ trigger_job(core, job);
+ dispatched_job = true;
+ n_dispatched++;
#ifdef HANDLE_TIMEOUTED_JOBS
- /* Check in one half second
- if it hangs */
- queue_delayed_work(
- b2r2_core.work_queue,
- &b2r2_core.timeout_work,
- HZ/2);
+ /* Check in one half second if it hangs */
+ queue_delayed_work(core->work_queue,
+ &core->timeout_work, HZ/2);
#endif
- } else {
- /* No resources */
- if (!atomic &&
- b2r2_core.n_active_jobs == 0) {
- b2r2_log_warn(
- "%s: No resource",
- __func__);
- cancel_job(job);
- }
- }
+ } else {
+ /* No resources */
+ if (!atomic && core->n_active_jobs == 0) {
+ b2r2_log_warn(core->dev,
+ "%s: No resource", __func__);
+ cancel_job(core, job);
}
}
} while (dispatched_job);
- b2r2_core.stat_n_jobs_in_prio_list -= n_dispatched;
+ core->stat_n_jobs_in_prio_list -= n_dispatched;
}
/**
@@ -1288,7 +1384,7 @@ static void check_prio_list(bool atomic)
*
* Reference count will be incremented for found job.
*
- * b2r2_core.lock must be held
+ * core->lock _must_ be held
*/
static struct b2r2_core_job *find_job_in_list(int job_id,
struct list_head *list)
@@ -1296,12 +1392,15 @@ static struct b2r2_core_job *find_job_in_list(int job_id,
struct list_head *ptr;
list_for_each(ptr, list) {
- struct b2r2_core_job *job =
- list_entry(ptr, struct b2r2_core_job, list);
+ struct b2r2_core_job *job = list_entry(
+ ptr, struct b2r2_core_job, list);
if (job->job_id == job_id) {
+ struct b2r2_blt_instance *instance =
+ (struct b2r2_blt_instance *) job->tag;
+ struct b2r2_core *core = instance->control->data;
/* Increase reference count, should be released by
the caller of b2r2_core_job_find */
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
return job;
}
}
@@ -1311,23 +1410,25 @@ static struct b2r2_core_job *find_job_in_list(int job_id,
/**
* find_job_in_active_jobs() - Finds job in active job queues
*
- * @jobid: Job id to find
+ * @core: The b2r2 core entity
+ * @job_id: Job id to find
*
* Reference count will be incremented for found job.
*
- * b2r2_core.lock must be held
+ * core->lock _must_ be held
*/
-static struct b2r2_core_job *find_job_in_active_jobs(int job_id)
+static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core,
+ int job_id)
{
int i;
struct b2r2_core_job *found_job = NULL;
- if (b2r2_core.n_active_jobs) {
- for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++) {
- struct b2r2_core_job *job = b2r2_core.active_jobs[i];
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
if (job && job->job_id == job_id) {
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
found_job = job;
break;
}
@@ -1344,19 +1445,20 @@ static struct b2r2_core_job *find_job_in_active_jobs(int job_id)
*
* Reference count will be incremented for found job.
*
- * b2r2_core.lock must be held
+ * core->lock must be held
*/
-static struct b2r2_core_job *find_tag_in_list(int tag, struct list_head *list)
+static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core,
+ int tag, struct list_head *list)
{
struct list_head *ptr;
list_for_each(ptr, list) {
struct b2r2_core_job *job =
- list_entry(ptr, struct b2r2_core_job, list);
+ list_entry(ptr, struct b2r2_core_job, list);
if (job->tag == tag) {
/* Increase reference count, should be released by
the caller of b2r2_core_job_find */
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
return job;
}
}
@@ -1370,19 +1472,20 @@ static struct b2r2_core_job *find_tag_in_list(int tag, struct list_head *list)
*
* Reference count will be incremented for found job.
*
- * b2r2_core.lock must be held
+ * core->lock must be held
*/
-static struct b2r2_core_job *find_tag_in_active_jobs(int tag)
+static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core,
+ int tag)
{
int i;
struct b2r2_core_job *found_job = NULL;
- if (b2r2_core.n_active_jobs) {
- for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++) {
- struct b2r2_core_job *job = b2r2_core.active_jobs[i];
+ if (core->n_active_jobs) {
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) {
+ struct b2r2_core_job *job = core->active_jobs[i];
if (job && job->tag == tag) {
- internal_job_addref(job, __func__);
+ internal_job_addref(core, job, __func__);
found_job = job;
break;
}
@@ -1396,27 +1499,27 @@ static struct b2r2_core_job *find_tag_in_active_jobs(int tag)
/**
* hw_reset() - Resets B2R2 hardware
*
- * b2r2_core.lock must be held
+ * core->lock must be held
*/
-static int hw_reset(void)
+static int hw_reset(struct b2r2_core *core)
{
u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
/* Tell B2R2 to reset */
- writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
- &b2r2_core.hw->BLT_CTL);
- writel(0x00000000, &b2r2_core.hw->BLT_CTL);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
+ writel(0x00000000, &core->hw->BLT_CTL);
- b2r2_log_info("wait for B2R2 to be idle..\n");
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
/** Wait for B2R2 to be idle (on a timeout rather than while loop) */
while ((uTimeOut > 0) &&
- ((readl(&b2r2_core.hw->BLT_STA1) &
+ ((readl(&core->hw->BLT_STA1) &
B2R2BLT_STA1BDISP_IDLE) == 0x0))
uTimeOut--;
if (uTimeOut == 0) {
- b2r2_log_warn(
+ b2r2_log_warn(core->dev,
"error-> after software reset B2R2 is not idle\n");
return -EAGAIN;
}
@@ -1431,114 +1534,106 @@ static int hw_reset(void)
*
* @job: Job to trigger
*
- * b2r2_core.lock must be held
+ * core->lock must be held
*/
-static void trigger_job(struct b2r2_core_job *job)
+static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job)
{
/* Debug prints */
- b2r2_log_info("queue 0x%x \n", job->queue);
- b2r2_log_info("BLT TRIG_IP 0x%x (first node)\n",
+ b2r2_log_info(core->dev, "queue 0x%x\n", job->queue);
+ b2r2_log_info(core->dev, "BLT TRIG_IP 0x%x (first node)\n",
job->first_node_address);
- b2r2_log_info("BLT LNA_CTL 0x%x (last node)\n",
+ b2r2_log_info(core->dev, "BLT LNA_CTL 0x%x (last node)\n",
job->last_node_address);
- b2r2_log_info("BLT TRIG_CTL 0x%x \n", job->control);
- b2r2_log_info("BLT PACE_CTL 0x%x \n", job->pace_control);
+ b2r2_log_info(core->dev, "BLT TRIG_CTL 0x%x\n", job->control);
+ b2r2_log_info(core->dev, "BLT PACE_CTL 0x%x\n", job->pace_control);
reset_hw_timer(job);
job->job_state = B2R2_CORE_JOB_RUNNING;
/* Enable interrupt */
- writel(readl(&b2r2_core.hw->BLT_ITM0) | job->interrupt_context,
- &b2r2_core.hw->BLT_ITM0);
-
- writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
- B2R2_PLUG_OPCODE_SIZE_64),
- &b2r2_core.hw->PLUGS1_OP2);
- writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
- &b2r2_core.hw->PLUGS1_CHZ);
- writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
- (b2r2_core.min_req_time << 16),
- &b2r2_core.hw->PLUGS1_MSZ);
- writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
- &b2r2_core.hw->PLUGS1_PGZ);
-
- writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
- B2R2_PLUG_OPCODE_SIZE_64),
- &b2r2_core.hw->PLUGS2_OP2);
- writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
- &b2r2_core.hw->PLUGS2_CHZ);
- writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
- (b2r2_core.min_req_time << 16),
- &b2r2_core.hw->PLUGS2_MSZ);
- writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
- &b2r2_core.hw->PLUGS2_PGZ);
-
- writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
- B2R2_PLUG_OPCODE_SIZE_64),
- &b2r2_core.hw->PLUGS3_OP2);
- writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
- &b2r2_core.hw->PLUGS3_CHZ);
- writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
- (b2r2_core.min_req_time << 16),
- &b2r2_core.hw->PLUGS3_MSZ);
- writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
- &b2r2_core.hw->PLUGS3_PGZ);
-
- writel(min_t(u8, max_t(u8, b2r2_core.op_size, B2R2_PLUG_OPCODE_SIZE_8),
- B2R2_PLUG_OPCODE_SIZE_64),
- &b2r2_core.hw->PLUGT_OP2);
- writel(min_t(u8, b2r2_core.ch_size, B2R2_PLUG_CHUNK_SIZE_128),
- &b2r2_core.hw->PLUGT_CHZ);
- writel(min_t(u8, b2r2_core.mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
- (b2r2_core.min_req_time << 16),
- &b2r2_core.hw->PLUGT_MSZ);
- writel(min_t(u8, b2r2_core.pg_size, B2R2_PLUG_PAGE_SIZE_256),
- &b2r2_core.hw->PLUGT_PGZ);
+ writel(readl(&core->hw->BLT_ITM0) | job->interrupt_context,
+ &core->hw->BLT_ITM0);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS1_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS1_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS1_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS1_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS2_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS2_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS2_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS2_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS3_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGS3_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGS3_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGS3_PGZ);
+
+ writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8),
+ B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGT_OP2);
+ writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128),
+ &core->hw->PLUGT_CHZ);
+ writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) |
+ (core->min_req_time << 16), &core->hw->PLUGT_MSZ);
+ writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256),
+ &core->hw->PLUGT_PGZ);
/* B2R2 kicks off when LNA is written, LNA write must be last! */
switch (job->queue) {
case B2R2_CORE_QUEUE_CQ1:
- writel(job->first_node_address, &b2r2_core.hw->BLT_CQ1_TRIG_IP);
- writel(job->control, &b2r2_core.hw->BLT_CQ1_TRIG_CTL);
- writel(job->pace_control, &b2r2_core.hw->BLT_CQ1_PACE_CTL);
+ writel(job->first_node_address, &core->hw->BLT_CQ1_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ1_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ1_PACE_CTL);
break;
case B2R2_CORE_QUEUE_CQ2:
- writel(job->first_node_address, &b2r2_core.hw->BLT_CQ2_TRIG_IP);
- writel(job->control, &b2r2_core.hw->BLT_CQ2_TRIG_CTL);
- writel(job->pace_control, &b2r2_core.hw->BLT_CQ2_PACE_CTL);
+ writel(job->first_node_address, &core->hw->BLT_CQ2_TRIG_IP);
+ writel(job->control, &core->hw->BLT_CQ2_TRIG_CTL);
+ writel(job->pace_control, &core->hw->BLT_CQ2_PACE_CTL);
break;
case B2R2_CORE_QUEUE_AQ1:
- writel(job->control, &b2r2_core.hw->BLT_AQ1_CTL);
- writel(job->first_node_address, &b2r2_core.hw->BLT_AQ1_IP);
+ writel(job->control, &core->hw->BLT_AQ1_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ1_IP);
wmb();
start_hw_timer(job);
- writel(job->last_node_address, &b2r2_core.hw->BLT_AQ1_LNA);
+ writel(job->last_node_address, &core->hw->BLT_AQ1_LNA);
break;
case B2R2_CORE_QUEUE_AQ2:
- writel(job->control, &b2r2_core.hw->BLT_AQ2_CTL);
- writel(job->first_node_address, &b2r2_core.hw->BLT_AQ2_IP);
+ writel(job->control, &core->hw->BLT_AQ2_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ2_IP);
wmb();
start_hw_timer(job);
- writel(job->last_node_address, &b2r2_core.hw->BLT_AQ2_LNA);
+ writel(job->last_node_address, &core->hw->BLT_AQ2_LNA);
break;
case B2R2_CORE_QUEUE_AQ3:
- writel(job->control, &b2r2_core.hw->BLT_AQ3_CTL);
- writel(job->first_node_address, &b2r2_core.hw->BLT_AQ3_IP);
+ writel(job->control, &core->hw->BLT_AQ3_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ3_IP);
wmb();
start_hw_timer(job);
- writel(job->last_node_address, &b2r2_core.hw->BLT_AQ3_LNA);
+ writel(job->last_node_address, &core->hw->BLT_AQ3_LNA);
break;
case B2R2_CORE_QUEUE_AQ4:
- writel(job->control, &b2r2_core.hw->BLT_AQ4_CTL);
- writel(job->first_node_address, &b2r2_core.hw->BLT_AQ4_IP);
+ writel(job->control, &core->hw->BLT_AQ4_CTL);
+ writel(job->first_node_address, &core->hw->BLT_AQ4_IP);
wmb();
start_hw_timer(job);
- writel(job->last_node_address, &b2r2_core.hw->BLT_AQ4_LNA);
+ writel(job->last_node_address, &core->hw->BLT_AQ4_LNA);
break;
/** Handle the default case */
@@ -1554,31 +1649,32 @@ static void trigger_job(struct b2r2_core_job *job)
*
* @queue: Queue to handle event for
*
- * b2r2_core.lock must be held
+ * core->lock must be held
*/
-static void handle_queue_event(enum b2r2_core_queue queue)
+static void handle_queue_event(struct b2r2_core *core,
+ enum b2r2_core_queue queue)
{
struct b2r2_core_job *job;
- job = b2r2_core.active_jobs[queue];
+ job = core->active_jobs[queue];
if (job) {
if (job->job_state != B2R2_CORE_JOB_RUNNING)
/* Should be running
Severe error. TBD */
- b2r2_log_warn(
+ b2r2_log_warn(core->dev,
"%s: Job is not running", __func__);
- stop_hw_timer(job);
+ stop_hw_timer(core, job);
/* Remove from queue */
- BUG_ON(b2r2_core.n_active_jobs == 0);
- b2r2_core.active_jobs[queue] = NULL;
- b2r2_core.n_active_jobs--;
+ BUG_ON(core->n_active_jobs == 0);
+ core->active_jobs[queue] = NULL;
+ core->n_active_jobs--;
}
if (!job) {
/* No job, error? */
- b2r2_log_warn("%s: No job", __func__);
+ b2r2_log_warn(core->dev, "%s: No job", __func__);
return;
}
@@ -1595,7 +1691,7 @@ static void handle_queue_event(enum b2r2_core_queue queue)
wake_up_interruptible(&job->event);
/* Dispatch to work queue to handle callbacks */
- queue_work(b2r2_core.work_queue, &job->work);
+ queue_work(core->work_queue, &job->work);
}
/**
@@ -1603,62 +1699,62 @@ static void handle_queue_event(enum b2r2_core_queue queue)
*
* @status: Contents of the B2R2 ITS register
*/
-static void process_events(u32 status)
+static void process_events(struct b2r2_core *core, u32 status)
{
u32 mask = 0xF;
u32 disable_itm_mask = 0;
- b2r2_log_info("Enters process_events \n");
- b2r2_log_info("status 0x%x \n", status);
+ b2r2_log_info(core->dev, "Enters process_events\n");
+ b2r2_log_info(core->dev, "status 0x%x\n", status);
/* Composition queue 1 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_CQ1);
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ1);
disable_itm_mask |= mask;
}
mask <<= 4;
/* Composition queue 2 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_CQ2);
+ handle_queue_event(core, B2R2_CORE_QUEUE_CQ2);
disable_itm_mask |= mask;
}
mask <<= 8;
/* Application queue 1 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_AQ1);
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ1);
disable_itm_mask |= mask;
}
mask <<= 4;
/* Application queue 2 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_AQ2);
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ2);
disable_itm_mask |= mask;
}
mask <<= 4;
/* Application queue 3 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_AQ3);
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ3);
disable_itm_mask |= mask;
}
mask <<= 4;
/* Application queue 4 */
if (status & mask) {
- handle_queue_event(B2R2_CORE_QUEUE_AQ4);
+ handle_queue_event(core, B2R2_CORE_QUEUE_AQ4);
disable_itm_mask |= mask;
}
/* Clear received interrupt flags */
- writel(status, &b2r2_core.hw->BLT_ITS);
+ writel(status, &core->hw->BLT_ITS);
/* Disable handled interrupts */
- writel(readl(&b2r2_core.hw->BLT_ITM0) & ~disable_itm_mask,
- &b2r2_core.hw->BLT_ITM0);
+ writel(readl(&core->hw->BLT_ITM0) & ~disable_itm_mask,
+ &core->hw->BLT_ITM0);
- b2r2_log_info("Returns process_events \n");
+ b2r2_log_info(core->dev, "Returns process_events\n");
}
/**
@@ -1670,23 +1766,24 @@ static void process_events(u32 status)
static irqreturn_t b2r2_irq_handler(int irq, void *x)
{
unsigned long flags;
+ struct b2r2_core* core = (struct b2r2_core *) x;
/* Spin lock is need in irq handler (SMP) */
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
/* Make sure that we have a clock */
/* Remember time for last irq (for timeout mgmt) */
- b2r2_core.jiffies_last_irq = jiffies;
- b2r2_core.stat_n_irq++;
+ core->jiffies_last_irq = jiffies;
+ core->stat_n_irq++;
/* Handle the interrupt(s) */
- process_events(readl(&b2r2_core.hw->BLT_ITS));
+ process_events(core, readl(&core->hw->BLT_ITS));
/* Check if we can dispatch new jobs */
- check_prio_list(true);
+ check_prio_list(core, true);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_unlock_irqrestore(&core->lock, flags);
return IRQ_HANDLED;
}
@@ -1902,18 +1999,18 @@ static const struct debugfs_reg debugfs_regs[] = {
/**
* printk_regs() - Print B2R2 registers to printk
*/
-static void printk_regs(void)
+static void printk_regs(struct b2r2_core *core)
{
#ifdef CONFIG_B2R2_DEBUG
int i;
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
unsigned long value = readl(
- (unsigned long *) (((u8 *) b2r2_core.hw) +
- debugfs_regs[i].offset));
- b2r2_log_regdump("%s: %08lX\n",
- debugfs_regs[i].name,
- value);
+ (unsigned long *) (((u8 *) core->hw) +
+ debugfs_regs[i].offset));
+ b2r2_log_regdump(core->dev, "%s: %08lX\n",
+ debugfs_regs[i].name,
+ value);
}
#endif
}
@@ -1934,7 +2031,6 @@ static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
{
size_t dev_size;
int ret = 0;
-
unsigned long value;
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
@@ -1944,9 +2040,8 @@ static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf,
}
/* Read from B2R2 */
- value = readl((unsigned long *) (((u8 *) b2r2_core.hw) +
- (u32) filp->f_dentry->
- d_inode->i_private));
+ value = readl((unsigned long *)
+ filp->f_dentry->d_inode->i_private);
/* Build the string */
dev_size = sprintf(Buf, "%8lX\n", value);
@@ -1998,8 +2093,8 @@ static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf,
if (sscanf(Buf, "%8lX", (unsigned long *) &reg_value) != 1)
return -EINVAL;
- writel(reg_value, (u32 *) (((u8 *) b2r2_core.hw) +
- (u32) filp->f_dentry->d_inode->i_private));
+ writel(reg_value, (u32 *)
+ filp->f_dentry->d_inode->i_private);
*f_pos += count;
ret = count;
@@ -2042,11 +2137,12 @@ static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf,
/* Build a giant string containing all registers */
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
unsigned long value =
- readl((unsigned long *) (((u8 *) b2r2_core.hw) +
- debugfs_regs[i].offset));
+ readl((u32 *) (((u8 *)
+ filp->f_dentry->d_inode->i_private) +
+ debugfs_regs[i].offset));
dev_size += sprintf(Buf + dev_size, "%s: %08lX\n",
- debugfs_regs[i].name,
- value);
+ debugfs_regs[i].name,
+ value);
}
/* No more to read if offset != 0 */
@@ -2092,6 +2188,7 @@ static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
int ret = 0;
int i = 0;
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
if (Buf == NULL) {
ret = -ENOMEM;
@@ -2099,21 +2196,22 @@ static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf,
}
/* Build a string containing all statistics */
- dev_size += sprintf(Buf + dev_size, "Interrupts: %lu\n",
- b2r2_core.stat_n_irq);
- dev_size += sprintf(Buf + dev_size, "Added jobs: %lu\n",
- b2r2_core.stat_n_jobs_added);
- dev_size += sprintf(Buf + dev_size, "Removed jobs: %lu\n",
- b2r2_core.stat_n_jobs_removed);
- dev_size += sprintf(Buf + dev_size, "Jobs in prio list: %lu\n",
- b2r2_core.stat_n_jobs_in_prio_list);
- dev_size += sprintf(Buf + dev_size, "Active jobs: %lu\n",
- b2r2_core.n_active_jobs);
- for (i = 0; i < ARRAY_SIZE(b2r2_core.active_jobs); i++)
- dev_size += sprintf(Buf + dev_size, "Job in queue %d: %p\n",
- i, b2r2_core.active_jobs[i]);
- dev_size += sprintf(Buf + dev_size, "Clock requests: %lu\n",
- b2r2_core.clock_request_count);
+ dev_size += sprintf(Buf + dev_size, "Interrupts : %lu\n",
+ core->stat_n_irq);
+ dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n",
+ core->stat_n_jobs_added);
+ dev_size += sprintf(Buf + dev_size, "Removed jobs : %lu\n",
+ core->stat_n_jobs_removed);
+ dev_size += sprintf(Buf + dev_size, "Jobs in prio list : %lu\n",
+ core->stat_n_jobs_in_prio_list);
+ dev_size += sprintf(Buf + dev_size, "Active jobs : %lu\n",
+ core->n_active_jobs);
+ for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++)
+ dev_size += sprintf(Buf + dev_size,
+ " Job in queue %d : 0x%08lx\n",
+ i, (unsigned long) core->active_jobs[i]);
+ dev_size += sprintf(Buf + dev_size, "Clock requests : %lu\n",
+ core->clock_request_count);
/* No more to read if offset != 0 */
if (*f_pos > dev_size)
@@ -2159,10 +2257,11 @@ static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
char Buf[10+2];
size_t dev_size;
int ret = 0;
+ struct b2r2_core *core = filp->f_dentry->d_inode->i_private;
- unsigned long value = clk_get_rate(b2r2_core.b2r2_clock);
+ unsigned long value = clk_get_rate(core->b2r2_clock);
- dev_size = sprintf(Buf, "%#010lX\n", value);
+ dev_size = sprintf(Buf, "%#010lx\n", value);
/* No more to read if offset != 0 */
if (*f_pos > dev_size)
@@ -2246,40 +2345,40 @@ static const struct file_operations debugfs_b2r2_clock_fops = {
* 6)Recover from any error without any leaks.
*
*/
-static int init_hw(void)
+static int init_hw(struct b2r2_core *core)
{
int result = 0;
u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
/* Put B2R2 into reset */
- clear_interrupts();
-
- writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
- &b2r2_core.hw->BLT_CTL);
+ clear_interrupts(core);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
/* Set up interrupt handler */
- result = request_irq(b2r2_core.irq, b2r2_irq_handler, 0,
- "b2r2-interrupt", 0);
+ result = request_irq(core->irq, b2r2_irq_handler, 0,
+ "b2r2-interrupt", core);
if (result) {
- b2r2_log_err("%s: failed to register IRQ for B2R2\n", __func__);
+ b2r2_log_err(core->dev,
+ "%s: failed to register IRQ for B2R2\n", __func__);
goto b2r2_init_request_irq_failed;
}
- b2r2_log_info("do a global reset..\n");
+ b2r2_log_info(core->dev, "do a global reset..\n");
/* Release reset */
- writel(0x00000000, &b2r2_core.hw->BLT_CTL);
+ writel(0x00000000, &core->hw->BLT_CTL);
- b2r2_log_info("wait for B2R2 to be idle..\n");
+ b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n");
/** Wait for B2R2 to be idle (on a timeout rather than while loop) */
while ((uTimeOut > 0) &&
- ((readl(&b2r2_core.hw->BLT_STA1) &
+ ((readl(&core->hw->BLT_STA1) &
B2R2BLT_STA1BDISP_IDLE) == 0x0))
uTimeOut--;
if (uTimeOut == 0) {
- b2r2_log_err(
+ b2r2_log_err(core->dev,
"%s: B2R2 not idle after SW reset\n", __func__);
result = -EAGAIN;
goto b2r2_core_init_hw_timeout;
@@ -2287,85 +2386,81 @@ static int init_hw(void)
#ifdef CONFIG_DEBUG_FS
/* Register debug fs files for register access */
- if (b2r2_core.debugfs_root_dir && !b2r2_core.debugfs_regs_dir) {
+ if (core->debugfs_core_root_dir && !core->debugfs_regs_dir) {
int i;
- b2r2_core.debugfs_regs_dir = debugfs_create_dir("regs",
- b2r2_core.debugfs_root_dir);
- debugfs_create_file("all", 0666, b2r2_core.debugfs_regs_dir,
- 0, &debugfs_b2r2_regs_fops);
+ core->debugfs_regs_dir = debugfs_create_dir("regs",
+ core->debugfs_core_root_dir);
+ debugfs_create_file("all", 0666, core->debugfs_regs_dir,
+ (void *)core->hw, &debugfs_b2r2_regs_fops);
/* Create debugfs entries for all static registers */
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++)
debugfs_create_file(debugfs_regs[i].name, 0666,
- b2r2_core.debugfs_regs_dir,
- (void *) debugfs_regs[i].offset,
+ core->debugfs_regs_dir,
+ (void *)(((u8 *) core->hw) +
+ debugfs_regs[i].offset),
&debugfs_b2r2_reg_fops);
}
#endif
- b2r2_log_info("%s ended..\n", __func__);
+ b2r2_log_info(core->dev, "%s ended..\n", __func__);
return result;
/** Recover from any error without any leaks */
-
b2r2_core_init_hw_timeout:
-
/** Free B2R2 interrupt handler */
-
- free_irq(b2r2_core.irq, 0);
+ free_irq(core->irq, core);
b2r2_init_request_irq_failed:
-
- if (b2r2_core.hw)
- iounmap(b2r2_core.hw);
- b2r2_core.hw = NULL;
+ if (core->hw)
+ iounmap(core->hw);
+ core->hw = NULL;
return result;
-
}
/**
* exit_hw() - B2R2 Hardware exit
*
- * b2r2_core.lock _must_ NOT be held
+ * core->lock _must_ NOT be held
*/
-static void exit_hw(void)
+static void exit_hw(struct b2r2_core *core)
{
unsigned long flags;
- b2r2_log_info("%s started..\n", __func__);
+ b2r2_log_info(core->dev, "%s started..\n", __func__);
#ifdef CONFIG_DEBUG_FS
/* Unregister our debugfs entries */
- if (b2r2_core.debugfs_regs_dir) {
- debugfs_remove_recursive(b2r2_core.debugfs_regs_dir);
- b2r2_core.debugfs_regs_dir = NULL;
+ if (core->debugfs_regs_dir) {
+ debugfs_remove_recursive(core->debugfs_regs_dir);
+ core->debugfs_regs_dir = NULL;
}
#endif
- b2r2_log_debug("%s: locking b2r2_core.lock\n", __func__);
- spin_lock_irqsave(&b2r2_core.lock, flags);
+ b2r2_log_debug(core->dev, "%s: locking core->lock\n", __func__);
+ spin_lock_irqsave(&core->lock, flags);
/* Cancel all pending jobs */
- b2r2_log_debug("%s: canceling pending jobs\n", __func__);
- exit_job_list(&b2r2_core.prio_queue);
+ b2r2_log_debug(core->dev, "%s: canceling pending jobs\n", __func__);
+ exit_job_list(core, &core->prio_queue);
/* Soft reset B2R2 (Close all DMA,
reset all state to idle, reset regs)*/
- b2r2_log_debug("%s: putting b2r2 in reset\n", __func__);
- writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
- &b2r2_core.hw->BLT_CTL);
+ b2r2_log_debug(core->dev, "%s: putting b2r2 in reset\n", __func__);
+ writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
+ &core->hw->BLT_CTL);
- b2r2_log_debug("%s: clearing interrupts\n", __func__);
- clear_interrupts();
+ b2r2_log_debug(core->dev, "%s: clearing interrupts\n", __func__);
+ clear_interrupts(core);
/** Free B2R2 interrupt handler */
- b2r2_log_debug("%s: freeing interrupt handler\n", __func__);
- free_irq(b2r2_core.irq, 0);
+ b2r2_log_debug(core->dev, "%s: freeing interrupt handler\n", __func__);
+ free_irq(core->irq, core);
- b2r2_log_debug("%s: unlocking b2r2_core.lock\n", __func__);
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ b2r2_log_debug(core->dev, "%s: unlocking core->lock\n", __func__);
+ spin_unlock_irqrestore(&core->lock, flags);
- b2r2_log_info("%s ended...\n", __func__);
+ b2r2_log_info(core->dev, "%s ended...\n", __func__);
}
/**
@@ -2377,58 +2472,68 @@ static int b2r2_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
+ struct b2r2_core *core;
+ struct b2r2_control *control;
BUG_ON(pdev == NULL);
+ BUG_ON(pdev->id < 0 || pdev->id >= B2R2_MAX_NBR_DEVICES);
- ret = b2r2_debug_init(&pdev->dev);
- if (ret < 0) {
- dev_err(b2r2_core.log_dev, "b2r2_debug_init failed\n");
- goto b2r2_probe_debug_init_failed;
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ dev_err(&pdev->dev, "b2r2 core alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_core_alloc_fail;
}
- b2r2_core.log_dev = &pdev->dev;
- b2r2_log_info("init started.\n");
+ core->dev = &pdev->dev;
+ dev_set_drvdata(core->dev, core);
+ if (pdev->id)
+ snprintf(core->name, sizeof(core->name), "b2r2_%d", pdev->id);
+ else
+ snprintf(core->name, sizeof(core->name), "b2r2");
+
+ dev_info(&pdev->dev, "init started.\n");
/* Init spin locks */
- spin_lock_init(&b2r2_core.lock);
+ spin_lock_init(&core->lock);
/* Init job queues */
- INIT_LIST_HEAD(&b2r2_core.prio_queue);
+ INIT_LIST_HEAD(&core->prio_queue);
#ifdef HANDLE_TIMEOUTED_JOBS
/* Create work queue for callbacks & timeout */
- INIT_DELAYED_WORK(&b2r2_core.timeout_work, timeout_work_function);
+ INIT_DELAYED_WORK(&core->timeout_work, timeout_work_function);
#endif
/* Work queue for callbacks and timeout management */
- b2r2_core.work_queue = create_workqueue("B2R2");
- if (!b2r2_core.work_queue) {
+ core->work_queue = create_workqueue("B2R2");
+ if (!core->work_queue) {
ret = -ENOMEM;
goto b2r2_probe_no_work_queue;
}
/* Get the clock for B2R2 */
- b2r2_core.b2r2_clock = clk_get(&pdev->dev, "b2r2");
- if (IS_ERR(b2r2_core.b2r2_clock)) {
- ret = PTR_ERR(b2r2_core.b2r2_clock);
- b2r2_log_err("clk_get b2r2 failed\n");
+ core->b2r2_clock = clk_get(core->dev, "b2r2");
+ if (IS_ERR(core->b2r2_clock)) {
+ ret = PTR_ERR(core->b2r2_clock);
+ dev_err(&pdev->dev, "clk_get b2r2 failed\n");
goto b2r2_probe_no_clk;
}
/* Get the B2R2 regulator */
- b2r2_core.b2r2_reg = regulator_get(&pdev->dev, "vsupply");
- if (IS_ERR(b2r2_core.b2r2_reg)) {
- ret = PTR_ERR(b2r2_core.b2r2_reg);
- b2r2_log_err("regulator_get vsupply failed (dev_name=%s)\n",
- dev_name(&pdev->dev));
+ core->b2r2_reg = regulator_get(core->dev, "vsupply");
+ if (IS_ERR(core->b2r2_reg)) {
+ ret = PTR_ERR(core->b2r2_reg);
+ dev_err(&pdev->dev, "regulator_get vsupply failed "
+ "(dev_name=%s)\n", dev_name(core->dev));
goto b2r2_probe_no_reg;
}
/* Init power management */
- mutex_init(&b2r2_core.domain_lock);
- INIT_DELAYED_WORK_DEFERRABLE(&b2r2_core.domain_disable_work,
+ mutex_init(&core->domain_lock);
+ INIT_DELAYED_WORK_DEFERRABLE(&core->domain_disable_work,
domain_disable_work_function);
- b2r2_core.domain_enabled = false;
+ core->domain_enabled = false;
/* Map B2R2 into kernel virtual memory space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2436,81 +2541,113 @@ static int b2r2_probe(struct platform_device *pdev)
goto b2r2_probe_no_res;
/* Hook up irq */
- b2r2_core.irq = platform_get_irq(pdev, 0);
- if (b2r2_core.irq <= 0) {
- b2r2_log_info("%s: Failed to request irq (irq=%d)\n", __func__,
- b2r2_core.irq);
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq <= 0) {
+ dev_err(&pdev->dev, "%s: Failed to request irq (irq=%d)\n",
+ __func__, core->irq);
goto b2r2_failed_irq_get;
}
- b2r2_core.hw = (struct b2r2_memory_map *) ioremap(res->start,
+ core->hw = (struct b2r2_memory_map *) ioremap(res->start,
res->end - res->start + 1);
- if (b2r2_core.hw == NULL) {
-
- b2r2_log_info("%s: ioremap failed\n", __func__);
+ if (core->hw == NULL) {
+ dev_err(&pdev->dev, "%s: ioremap failed\n", __func__);
ret = -ENOMEM;
goto b2r2_probe_ioremap_failed;
}
- dev_dbg(b2r2_core.log_dev,
- "b2r2 structure address %p\n",
- b2r2_core.hw);
+ dev_dbg(core->dev, "b2r2 structure address %p\n", core->hw);
- /* Initialize b2r2_blt module. FIXME: Module of it's own
- or perhaps a dedicated module init c file? */
- ret = b2r2_blt_module_init();
- if (ret < 0) {
- b2r2_log_err("b2r2_blt_module_init failed\n");
- goto b2r2_probe_blt_init_fail;
+ control = kzalloc(sizeof(*control), GFP_KERNEL);
+ if (!control) {
+ dev_err(&pdev->dev, "b2r2 control alloc failed\n");
+ ret = -EINVAL;
+ goto b2r2_probe_control_alloc_fail;
}
- b2r2_core.op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
- b2r2_core.ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
- b2r2_core.pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
- b2r2_core.mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
- b2r2_core.min_req_time = 0;
+ control->miscdev.parent = core->dev;
+ control->data = (void *)core;
+ control->id = pdev->id;
+ control->dev = &pdev->dev; /* Temporary device */
+ snprintf(control->name, sizeof(control->name), "%s_blt", core->name);
+
+ core->op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT;
+ core->ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT;
+ core->pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT;
+ core->mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT;
+ core->min_req_time = 0;
#ifdef CONFIG_DEBUG_FS
- b2r2_core.debugfs_root_dir = debugfs_create_dir("b2r2", NULL);
- debugfs_create_file("stat", 0666, b2r2_core.debugfs_root_dir,
- 0, &debugfs_b2r2_stat_fops);
- debugfs_create_file("clock", 0666, b2r2_core.debugfs_root_dir,
- 0, &debugfs_b2r2_clock_fops);
-
- debugfs_create_u8("op_size", 0666, b2r2_core.debugfs_root_dir,
- &b2r2_core.op_size);
- debugfs_create_u8("ch_size", 0666, b2r2_core.debugfs_root_dir,
- &b2r2_core.ch_size);
- debugfs_create_u8("pg_size", 0666, b2r2_core.debugfs_root_dir,
- &b2r2_core.pg_size);
- debugfs_create_u8("mg_size", 0666, b2r2_core.debugfs_root_dir,
- &b2r2_core.mg_size);
- debugfs_create_u16("min_req_time", 0666, b2r2_core.debugfs_root_dir,
- &b2r2_core.min_req_time);
+ core->debugfs_root_dir = debugfs_create_dir(core->name, NULL);
+ core->debugfs_core_root_dir = debugfs_create_dir("core",
+ core->debugfs_root_dir);
+ debugfs_create_file("stats", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_stat_fops);
+ debugfs_create_file("clock", 0666, core->debugfs_core_root_dir,
+ core, &debugfs_b2r2_clock_fops);
+ debugfs_create_u8("op_size", 0666, core->debugfs_core_root_dir,
+ &core->op_size);
+ debugfs_create_u8("ch_size", 0666, core->debugfs_core_root_dir,
+ &core->ch_size);
+ debugfs_create_u8("pg_size", 0666, core->debugfs_core_root_dir,
+ &core->pg_size);
+ debugfs_create_u8("mg_size", 0666, core->debugfs_core_root_dir,
+ &core->mg_size);
+ debugfs_create_u16("min_req_time", 0666, core->debugfs_core_root_dir,
+ &core->min_req_time);
+
+ control->debugfs_debug_root_dir = debugfs_create_dir("debug",
+ core->debugfs_root_dir);
+ control->mem_heap.debugfs_root_dir = debugfs_create_dir("mem",
+ core->debugfs_root_dir);
+ control->debugfs_root_dir = debugfs_create_dir("blt",
+ core->debugfs_root_dir);
#endif
- b2r2_log_info("init done.\n");
+ ret = b2r2_debug_init(control);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "b2r2_debug_init failed\n");
+ goto b2r2_probe_debug_init_failed;
+ }
+
+ /* Initialize b2r2_blt module. FIXME: Module of it's own
+ or perhaps a dedicated module init c file? */
+ ret = b2r2_blt_module_init(control);
+ if (ret < 0) {
+ b2r2_log_err(&pdev->dev, "b2r2_blt_module_init failed\n");
+ goto b2r2_probe_blt_init_fail;
+ }
+
+ core->control = control;
+ b2r2_core[pdev->id] = core;
+ dev_info(&pdev->dev, "init done.\n");
return ret;
/** Recover from any error if something fails */
b2r2_probe_blt_init_fail:
+ kfree(control);
+b2r2_probe_control_alloc_fail:
b2r2_probe_ioremap_failed:
b2r2_failed_irq_get:
b2r2_probe_no_res:
- regulator_put(b2r2_core.b2r2_reg);
+ regulator_put(core->b2r2_reg);
b2r2_probe_no_reg:
- clk_put(b2r2_core.b2r2_clock);
+ clk_put(core->b2r2_clock);
b2r2_probe_no_clk:
- destroy_workqueue(b2r2_core.work_queue);
- b2r2_core.work_queue = NULL;
+ destroy_workqueue(core->work_queue);
+ core->work_queue = NULL;
b2r2_probe_no_work_queue:
-
- b2r2_log_info("init done with errors.\n");
+ b2r2_debug_exit();
b2r2_probe_debug_init_failed:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(core->debugfs_root_dir);
+#endif
+ kfree(core);
+b2r2_probe_core_alloc_fail:
+ dev_info(&pdev->dev, "init done with errors.\n");
return ret;
-
}
@@ -2523,83 +2660,94 @@ b2r2_probe_debug_init_failed:
static int b2r2_remove(struct platform_device *pdev)
{
unsigned long flags;
+ struct b2r2_core *core;
BUG_ON(pdev == NULL);
- b2r2_log_info("%s started\n", __func__);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(&pdev->dev, "%s: Started\n", __func__);
#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(b2r2_core.debugfs_root_dir);
+ debugfs_remove_recursive(core->debugfs_root_dir);
#endif
/* Flush B2R2 work queue (call all callbacks) */
- flush_workqueue(b2r2_core.work_queue);
+ flush_workqueue(core->work_queue);
/* Exit b2r2 blt module */
- b2r2_blt_module_exit();
+ b2r2_blt_module_exit(core->control);
+
+ kfree(core->control);
#ifdef HANDLE_TIMEOUTED_JOBS
- cancel_delayed_work(&b2r2_core.timeout_work);
+ cancel_delayed_work(&core->timeout_work);
#endif
/* Flush B2R2 work queue (call all callbacks for
cancelled jobs) */
- flush_workqueue(b2r2_core.work_queue);
+ flush_workqueue(core->work_queue);
/* Make sure the power is turned off */
- cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+ cancel_delayed_work_sync(&core->domain_disable_work);
/** Unmap B2R2 registers */
- b2r2_log_info("unmap b2r2 registers..\n");
- if (b2r2_core.hw) {
- iounmap(b2r2_core.hw);
-
- b2r2_core.hw = NULL;
+ b2r2_log_info(&pdev->dev, "unmap b2r2 registers..\n");
+ if (core->hw) {
+ iounmap(core->hw);
+ core->hw = NULL;
}
- destroy_workqueue(b2r2_core.work_queue);
+ destroy_workqueue(core->work_queue);
- spin_lock_irqsave(&b2r2_core.lock, flags);
- b2r2_core.work_queue = NULL;
- spin_unlock_irqrestore(&b2r2_core.lock, flags);
+ spin_lock_irqsave(&core->lock, flags);
+ core->work_queue = NULL;
+ spin_unlock_irqrestore(&core->lock, flags);
/* Return the clock */
- clk_put(b2r2_core.b2r2_clock);
- regulator_put(b2r2_core.b2r2_reg);
+ clk_put(core->b2r2_clock);
+ regulator_put(core->b2r2_reg);
- b2r2_log_info("%s ended\n", __func__);
-
- b2r2_core.log_dev = NULL;
+ core->dev = NULL;
+ kfree(core);
+ b2r2_core[pdev->id] = NULL;
b2r2_debug_exit();
- return 0;
+ b2r2_log_info(&pdev->dev, "%s: Ended\n", __func__);
+ return 0;
}
/**
* b2r2_suspend() - This routine puts the B2R2 device in to sustend state.
* @pdev: platform device.
*
- * This routine stores the current state of the b2r2 device and puts in to suspend state.
+ * This routine stores the current state of the b2r2 device and puts in to
+ * suspend state.
*
*/
int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
{
- b2r2_log_info("%s\n", __func__);
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
/* Flush B2R2 work queue (call all callbacks) */
- flush_workqueue(b2r2_core.work_queue);
+ flush_workqueue(core->work_queue);
#ifdef HANDLE_TIMEOUTED_JOBS
- cancel_delayed_work(&b2r2_core.timeout_work);
+ cancel_delayed_work(&core->timeout_work);
#endif
/* Flush B2R2 work queue (call all callbacks for
cancelled jobs) */
- flush_workqueue(b2r2_core.work_queue);
+ flush_workqueue(core->work_queue);
/* Make sure power is turned off */
- cancel_delayed_work_sync(&b2r2_core.domain_disable_work);
+ cancel_delayed_work_sync(&core->domain_disable_work);
return 0;
}
@@ -2614,7 +2762,12 @@ int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
*/
int b2r2_resume(struct platform_device *pdev)
{
- b2r2_log_info("%s\n", __func__);
+ struct b2r2_core *core;
+
+ BUG_ON(pdev == NULL);
+ core = dev_get_drvdata(&pdev->dev);
+ BUG_ON(core == NULL);
+ b2r2_log_info(core->dev, "%s\n", __func__);
return 0;
}
diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h
index 2f958751694..991dd9d9d1b 100644
--- a/drivers/video/b2r2/b2r2_core.h
+++ b/drivers/video/b2r2/b2r2_core.h
@@ -18,126 +18,6 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
-/**
- * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
- *
- * @B2R2_CORE_QUEUE_AQ1: Application queue 1
- * @B2R2_CORE_QUEUE_AQ2: Application queue 2
- * @B2R2_CORE_QUEUE_AQ3: Application queue 3
- * @B2R2_CORE_QUEUE_AQ4: Application queue 4
- * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
- * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
- * @B2R2_CORE_QUEUE_NO_OF: Number of queues
- */
-enum b2r2_core_queue {
- B2R2_CORE_QUEUE_AQ1 = 0,
- B2R2_CORE_QUEUE_AQ2,
- B2R2_CORE_QUEUE_AQ3,
- B2R2_CORE_QUEUE_AQ4,
- B2R2_CORE_QUEUE_CQ1,
- B2R2_CORE_QUEUE_CQ2,
- B2R2_CORE_QUEUE_NO_OF,
-};
-
-#define B2R2_NUM_APPLICATIONS_QUEUES 4
-
-/**
- * enum b2r2_core_job_state - Indicates the current state of the job
- *
- * @B2R2_CORE_JOB_IDLE: Never queued
- * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
- * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
- * @B2R2_CORE_JOB_DONE: Completed
- * @B2R2_CORE_JOB_CANCELED: Canceled
- */
-enum b2r2_core_job_state {
- B2R2_CORE_JOB_IDLE = 0,
- B2R2_CORE_JOB_QUEUED,
- B2R2_CORE_JOB_RUNNING,
- B2R2_CORE_JOB_DONE,
- B2R2_CORE_JOB_CANCELED,
-};
-
-/**
- * struct b2r2_core_job - Represents a B2R2 core job
- *
- * @start_sentinel: Memory overwrite guard
- *
- * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
- * @prio: Job priority, from -19 up to 20. Mapped to the
- * B2R2 application queues. Filled in by the client.
- * @first_node_address: Physical address of the first node. Filled
- * in by the client.
- * @last_node_address: Physical address of the last node. Filled
- * in by the client.
- *
- * @callback: Function that will be called when the job is done.
- * @acquire_resources: Function that allocates the resources needed
- * to execute the job (i.e. SRAM alloc). Must not
- * sleep if atomic, should fail with negative error code
- * if resources not available.
- * @release_resources: Function that releases the resources previously
- * allocated by acquire_resources (i.e. SRAM alloc).
- * @release: Function that will be called when the reference count reaches
- * zero.
- *
- * @job_id: Unique id for this job, assigned by B2R2 core
- * @job_state: The current state of the job
- * @jiffies: Number of jiffies needed for this request
- *
- * @list: List entry element for internal list management
- * @event: Wait queue event to wait for job done
- * @work: Work queue structure, for callback implementation
- *
- * @queue: The queue that this job shall be submitted to
- * @control: B2R2 Queue control
- * @pace_control: For composition queue only
- * @interrupt_context: Context for interrupt
- *
- * @end_sentinel: Memory overwrite guard
- */
-struct b2r2_core_job {
- u32 start_sentinel;
-
- /* Data to be filled in by client */
- int tag;
- int prio;
- u32 first_node_address;
- u32 last_node_address;
- void (*callback)(struct b2r2_core_job *);
- int (*acquire_resources)(struct b2r2_core_job *,
- bool atomic);
- void (*release_resources)(struct b2r2_core_job *,
- bool atomic);
- void (*release)(struct b2r2_core_job *);
-
- /* Output data, do not modify */
- int job_id;
- enum b2r2_core_job_state job_state;
- unsigned long jiffies;
-
- /* Data below is internal to b2r2_core, do not modify */
-
- /* Reference counting */
- u32 ref_count;
-
- /* Internal data */
- struct list_head list;
- wait_queue_head_t event;
- struct work_struct work;
-
- /* B2R2 HW data */
- enum b2r2_core_queue queue;
- u32 control;
- u32 pace_control;
- u32 interrupt_context;
-
- /* Timing data */
- u32 hw_start_time;
- s32 nsec_active_in_hw;
-
- u32 end_sentinel;
-};
/**
* b2r2_core_job_add() - Adds a job to B2R2 job queues
@@ -147,12 +27,14 @@ struct b2r2_core_job {
* release the reference. The job callback function will be always
* be called after the job is done or cancelled.
*
+ * @control: The b2r2 control entity
* @job: Job to be added
*
* Returns 0 if OK else negative error code
*
*/
-int b2r2_core_job_add(struct b2r2_core_job *job);
+int b2r2_core_job_add(struct b2r2_control *control,
+ struct b2r2_core_job *job);
/**
* b2r2_core_job_wait() - Waits for an added job to be done.
@@ -179,12 +61,14 @@ int b2r2_core_job_cancel(struct b2r2_core_job *job);
*
* Reference count will be increased for the found job
*
+ * @control: The b2r2 control entity
* @job_id: Job id to find
*
* Returns job if found, else NULL
*
*/
-struct b2r2_core_job *b2r2_core_job_find(int job_id);
+struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control,
+ int job_id);
/**
* b2r2_core_job_find_first_with_tag() - Finds first job with given tag
@@ -193,12 +77,14 @@ struct b2r2_core_job *b2r2_core_job_find(int job_id);
* This function can be used to find all jobs for a client, i.e.
* when cancelling all jobs for a client.
*
+ * @control: The b2r2 control entity
* @tag: Tag to find
*
* Returns job if found, else NULL
*
*/
-struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag);
+struct b2r2_core_job *b2r2_core_job_find_first_with_tag(
+ struct b2r2_control *control, int tag);
/**
* b2r2_core_job_addref() - Increase the job reference count.
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
index d4711cd3e28..23a0b1aa9ac 100644
--- a/drivers/video/b2r2/b2r2_debug.c
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -16,15 +16,11 @@
#include <linux/uaccess.h>
int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
-struct device *b2r2_log_dev;
-
-static struct dentry *root_dir;
static struct dentry *log_lvl_dir;
-static struct dentry *stats_dir;
+static int module_init;
#define CHARS_IN_NODE_DUMP 1544
-
-static const size_t dumped_node_size = CHARS_IN_NODE_DUMP * sizeof(char) + 1;
+#define DUMPED_NODE_SIZE (CHARS_IN_NODE_DUMP * sizeof(char) + 1)
static void dump_node(char *dst, struct b2r2_node *node)
{
@@ -175,11 +171,8 @@ static void dump_node(char *dst, struct b2r2_node *node)
}
-struct mutex last_job_lock;
-
-static struct b2r2_node *last_job;
-
-void b2r2_debug_job_done(struct b2r2_node *first_node)
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
{
struct b2r2_node *node = first_node;
struct b2r2_node **dst_node;
@@ -190,20 +183,20 @@ void b2r2_debug_job_done(struct b2r2_node *first_node)
node = node->next;
}
- mutex_lock(&last_job_lock);
+ mutex_lock(&cont->last_job_lock);
- if (last_job) {
- node = last_job;
+ if (cont->last_job) {
+ node = cont->last_job;
while (node != NULL) {
struct b2r2_node *tmp = node->next;
kfree(node);
node = tmp;
}
- last_job = NULL;
+ cont->last_job = NULL;
}
node = first_node;
- dst_node = &last_job;
+ dst_node = &cont->last_job;
while (node != NULL) {
*dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL);
if (!(*dst_node))
@@ -215,29 +208,27 @@ void b2r2_debug_job_done(struct b2r2_node *first_node)
node = node->next;
}
- mutex_unlock(&last_job_lock);
+ mutex_unlock(&cont->last_job_lock);
return;
last_job_alloc_failed:
- mutex_unlock(&last_job_lock);
+ mutex_unlock(&cont->last_job_lock);
- while (last_job != NULL) {
- struct b2r2_node *tmp = last_job->next;
- kfree(last_job);
- last_job = tmp;
+ while (cont->last_job != NULL) {
+ struct b2r2_node *tmp = cont->last_job->next;
+ kfree(cont->last_job);
+ cont->last_job = tmp;
}
return;
}
-static char *last_job_chars;
-static int prev_node_count;
-
-static ssize_t last_job_read(struct file *filep, char __user *buf,
+static ssize_t last_job_read(struct file *filp, char __user *buf,
size_t bytes, loff_t *off)
{
- struct b2r2_node *node = last_job;
+ struct b2r2_control *cont = filp->f_dentry->d_inode->i_private;
+ struct b2r2_node *node = cont->last_job;
int node_count = 0;
int i;
@@ -248,26 +239,27 @@ static ssize_t last_job_read(struct file *filep, char __user *buf,
for (; node != NULL; node = node->next)
node_count++;
- size = node_count * dumped_node_size;
+ size = node_count * DUMPED_NODE_SIZE;
- if (node_count != prev_node_count) {
- kfree(last_job_chars);
+ if (node_count != cont->prev_node_count) {
+ kfree(cont->last_job_chars);
- last_job_chars = kzalloc(size, GFP_KERNEL);
- if (!last_job_chars)
+ cont->last_job_chars = kzalloc(size, GFP_KERNEL);
+ if (!cont->last_job_chars)
return 0;
- prev_node_count = node_count;
+ cont->prev_node_count = node_count;
}
- mutex_lock(&last_job_lock);
- node = last_job;
+ mutex_lock(&cont->last_job_lock);
+ node = cont->last_job;
for (i = 0; i < node_count; i++) {
BUG_ON(node == NULL);
- dump_node(last_job_chars + i * dumped_node_size/sizeof(char),
- node);
+ dump_node(cont->last_job_chars +
+ i * DUMPED_NODE_SIZE/sizeof(char),
+ node);
node = node->next;
}
- mutex_unlock(&last_job_lock);
+ mutex_unlock(&cont->last_job_lock);
if (offs > size)
return 0;
@@ -277,7 +269,7 @@ static ssize_t last_job_read(struct file *filep, char __user *buf,
else
count = bytes;
- if (copy_to_user(buf, last_job_chars + offs, count))
+ if (copy_to_user(buf, cont->last_job_chars + offs, count))
return -EFAULT;
*off = offs + count;
@@ -288,48 +280,48 @@ static const struct file_operations last_job_fops = {
.read = last_job_read,
};
-int b2r2_debug_init(struct device *log_dev)
+int b2r2_debug_init(struct b2r2_control *cont)
{
int i;
- b2r2_log_dev = log_dev;
-
- for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
- b2r2_log_levels[i] = 0;
-
- root_dir = debugfs_create_dir("b2r2_debug", NULL);
- if (!root_dir) {
- b2r2_log_warn("%s: could not create root dir\n", __func__);
- return -ENODEV;
- }
+ if (!module_init) {
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = 0;
#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
- /*
- * If dynamic debug is disabled we need some other way to control the
- * log prints
- */
- log_lvl_dir = debugfs_create_dir("logs", root_dir);
-
- /* No need to save the files, they will be removed recursively */
- (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
- &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
- (void)debugfs_create_bool("info", 0644, log_lvl_dir,
- &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
- (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
- &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
- (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
- &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+ /*
+ * If dynamic debug is disabled we need some other way to
+ * control the log prints
+ */
+ log_lvl_dir = debugfs_create_dir("b2r2_log", NULL);
+
+ /* No need to save the files,
+ * they will be removed recursively */
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
#elif defined(CONFIG_DYNAMIC_DEBUG)
- /* log_lvl_dir is never used */
- (void)log_lvl_dir;
+ /* log_lvl_dir is never used */
+ (void)log_lvl_dir;
#endif
+ module_init++;
+ }
- stats_dir = debugfs_create_dir("stats", root_dir);
- (void)debugfs_create_file("last_job", 0444, stats_dir, NULL,
- &last_job_fops);
+ if (cont->debugfs_debug_root_dir) {
+ /* No need to save the file,
+ * it will be removed recursively */
+ (void)debugfs_create_file("last_job", 0444,
+ cont->debugfs_debug_root_dir, cont,
+ &last_job_fops);
+ }
- mutex_init(&last_job_lock);
+ mutex_init(&cont->last_job_lock);
return 0;
}
@@ -337,7 +329,10 @@ int b2r2_debug_init(struct device *log_dev)
void b2r2_debug_exit(void)
{
#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
- if (root_dir)
- debugfs_remove_recursive(root_dir);
+ module_init--;
+ if (!module_init && log_lvl_dir) {
+ debugfs_remove_recursive(log_lvl_dir);
+ log_lvl_dir = NULL;
+ }
#endif
}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
index f87ca728482..1b1ac83f6cb 100644
--- a/drivers/video/b2r2/b2r2_debug.h
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -33,50 +33,47 @@ enum b2r2_log_levels {
*/
extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
-extern struct device *b2r2_log_dev;
-
-#define b2r2_log_err(...) do { \
+#define b2r2_log_err(b2r2_log_dev, ...) do { \
dev_err(b2r2_log_dev, __VA_ARGS__); \
} while (0)
/* If dynamic debug is enabled it should be used instead of loglevels */
#ifdef CONFIG_DYNAMIC_DEBUG
-# define b2r2_log_warn(...) do { \
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
} while (0)
-# define b2r2_log_info(...) do { \
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
} while (0)
-# define b2r2_log_debug(...) do { \
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
} while (0)
-# define b2r2_log_regdump(...) do { \
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
} while (0)
#else
-# define b2r2_log_warn(...) do { \
+# define b2r2_log_warn(b2r2_log_dev, ...) do { \
if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \
} while (0)
-# define b2r2_log_info(...) do { \
+# define b2r2_log_info(b2r2_log_dev, ...) do { \
if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \
} while (0)
-# define b2r2_log_debug(...) do { \
+# define b2r2_log_debug(b2r2_log_dev, ...) do { \
if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
} while (0)
-# define b2r2_log_regdump(...) do { \
+# define b2r2_log_regdump(b2r2_log_dev, ...) do { \
if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
} while (0)
#endif
-
-int b2r2_debug_init(struct device *log_dev);
+int b2r2_debug_init(struct b2r2_control *cont);
void b2r2_debug_exit(void);
-
-void b2r2_debug_job_done(struct b2r2_node *node);
+void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node);
#else
@@ -86,7 +83,7 @@ void b2r2_debug_job_done(struct b2r2_node *node);
#define b2r2_log_debug(...)
#define b2r2_log_regdump(...)
-static inline int b2r2_debug_init(struct device *log_dev)
+static inline int b2r2_debug_init(struct b2r2_control *cont)
{
return 0;
}
@@ -94,7 +91,8 @@ static inline void b2r2_debug_exit(void)
{
return;
}
-static inline void b2r2_debug_job_done(struct b2r2_node *node)
+static inline void b2r2_debug_job_done(struct b2r2_control *cont,
+ struct b2r2_node *node)
{
return;
}
diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c
index 85cb697b522..a969816a9e7 100644
--- a/drivers/video/b2r2/b2r2_filters.c
+++ b/drivers/video/b2r2/b2r2_filters.c
@@ -236,49 +236,49 @@ static struct b2r2_filter_spec blur_filter = {
};
/* Private function declarations */
-static int alloc_filter_coeffs(struct b2r2_filter_spec *filter);
-static void free_filter_coeffs(struct b2r2_filter_spec *filter);
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter);
/* Public functions */
-static int filters_initialized;
-
-int b2r2_filters_init()
+int b2r2_filters_init(struct b2r2_control *cont)
{
int i;
- if (filters_initialized)
+ if (cont->filters_initialized)
return 0;
for (i = 0; i < filters_size; i++) {
- alloc_filter_coeffs(&filters[i]);
+ alloc_filter_coeffs(cont->dev, &filters[i]);
}
- alloc_filter_coeffs(&bilinear_filter);
- alloc_filter_coeffs(&default_downscale_filter);
- alloc_filter_coeffs(&blur_filter);
+ alloc_filter_coeffs(cont->dev, &bilinear_filter);
+ alloc_filter_coeffs(cont->dev, &default_downscale_filter);
+ alloc_filter_coeffs(cont->dev, &blur_filter);
- filters_initialized = 1;
+ cont->filters_initialized = 1;
return 0;
}
-void b2r2_filters_exit()
+void b2r2_filters_exit(struct b2r2_control *cont)
{
int i;
- if (!filters_initialized)
+ if (!cont->filters_initialized)
return;
for (i = 0; i < filters_size; i++) {
- free_filter_coeffs(&filters[i]);
+ free_filter_coeffs(cont->dev, &filters[i]);
}
- free_filter_coeffs(&bilinear_filter);
- free_filter_coeffs(&default_downscale_filter);
- free_filter_coeffs(&blur_filter);
+ free_filter_coeffs(cont->dev, &bilinear_filter);
+ free_filter_coeffs(cont->dev, &default_downscale_filter);
+ free_filter_coeffs(cont->dev, &blur_filter);
- filters_initialized = 0;
+ cont->filters_initialized = 0;
}
struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor)
@@ -323,11 +323,12 @@ struct b2r2_filter_spec *b2r2_filter_blur()
}
/* Private functions */
-static int alloc_filter_coeffs(struct b2r2_filter_spec *filter)
+static int alloc_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
{
int ret;
- filter->h_coeffs_dma_addr = dma_alloc_coherent(b2r2_blt_device(),
+ filter->h_coeffs_dma_addr = dma_alloc_coherent(dev,
B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr),
GFP_DMA | GFP_KERNEL);
if (filter->h_coeffs_dma_addr == NULL) {
@@ -335,7 +336,7 @@ static int alloc_filter_coeffs(struct b2r2_filter_spec *filter)
goto error;
}
- filter->v_coeffs_dma_addr = dma_alloc_coherent(b2r2_blt_device(),
+ filter->v_coeffs_dma_addr = dma_alloc_coherent(dev,
B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr),
GFP_DMA | GFP_KERNEL);
if (filter->v_coeffs_dma_addr == NULL) {
@@ -343,25 +344,28 @@ static int alloc_filter_coeffs(struct b2r2_filter_spec *filter)
goto error;
}
- memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs, B2R2_HF_TABLE_SIZE);
- memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs, B2R2_VF_TABLE_SIZE);
+ memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs,
+ B2R2_HF_TABLE_SIZE);
+ memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs,
+ B2R2_VF_TABLE_SIZE);
return 0;
error:
- free_filter_coeffs(filter);
+ free_filter_coeffs(dev, filter);
return ret;
}
-static void free_filter_coeffs(struct b2r2_filter_spec *filter)
+static void free_filter_coeffs(struct device *dev,
+ struct b2r2_filter_spec *filter)
{
if (filter->h_coeffs_dma_addr != NULL)
- dma_free_coherent(b2r2_blt_device(), B2R2_HF_TABLE_SIZE,
+ dma_free_coherent(dev, B2R2_HF_TABLE_SIZE,
filter->h_coeffs_dma_addr,
filter->h_coeffs_phys_addr);
if (filter->v_coeffs_dma_addr != NULL)
- dma_free_coherent(b2r2_blt_device(), B2R2_VF_TABLE_SIZE,
+ dma_free_coherent(dev, B2R2_VF_TABLE_SIZE,
filter->v_coeffs_dma_addr,
filter->v_coeffs_phys_addr);
diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h
index 0eeefc6b0e0..790c9ec8ee9 100644
--- a/drivers/video/b2r2/b2r2_filters.h
+++ b/drivers/video/b2r2/b2r2_filters.h
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
+#include "b2r2_internal.h"
+
#define B2R2_HF_TABLE_SIZE 64
#define B2R2_VF_TABLE_SIZE 40
@@ -45,12 +47,12 @@ struct b2r2_filter_spec {
/**
* b2r2_filters_init() - Initilizes the B2R2 filters
*/
-int b2r2_filters_init(void);
+int b2r2_filters_init(struct b2r2_control *control);
/**
* b2r2_filters_init() - De-initilizes the B2R2 filters
*/
-void b2r2_filters_exit(void);
+void b2r2_filters_exit(struct b2r2_control *control);
/**
* b2r2_filter_find() - Find a filter matching the given scale factor
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
index 5941e39be91..1a27adbaadf 100644
--- a/drivers/video/b2r2/b2r2_generic.c
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -37,9 +37,10 @@
/**
* reset_nodes() - clears the node list
*/
-static void reset_nodes(struct b2r2_node *node)
+static void reset_nodes(struct b2r2_control *cont,
+ struct b2r2_node *node)
{
- b2r2_log_info("%s ENTRY\n", __func__);
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
while (node != NULL) {
memset(&(node->node), 0, sizeof(node->node));
@@ -54,113 +55,115 @@ static void reset_nodes(struct b2r2_node *node)
node = node->next;
}
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
/**
* dump_nodes() - prints the node list
*/
-static void dump_nodes(struct b2r2_node *first, bool dump_all)
+static void dump_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first, bool dump_all)
{
struct b2r2_node *node = first;
- b2r2_log_info("%s ENTRY\n", __func__);
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
do {
- b2r2_log_debug("\nNODE START:\n=============\n");
- b2r2_log_debug("B2R2_ACK: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "\nNODE START:\n=============\n");
+ b2r2_log_debug(cont->dev, "B2R2_ACK: \t0x%.8x\n",
node->node.GROUP0.B2R2_ACK);
- b2r2_log_debug("B2R2_INS: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_INS: \t0x%.8x\n",
node->node.GROUP0.B2R2_INS);
- b2r2_log_debug("B2R2_CIC: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_CIC: \t0x%.8x\n",
node->node.GROUP0.B2R2_CIC);
- b2r2_log_debug("B2R2_NIP: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_NIP: \t0x%.8x\n",
node->node.GROUP0.B2R2_NIP);
- b2r2_log_debug("B2R2_TSZ: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_TSZ: \t0x%.8x\n",
node->node.GROUP1.B2R2_TSZ);
- b2r2_log_debug("B2R2_TXY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_TXY: \t0x%.8x\n",
node->node.GROUP1.B2R2_TXY);
- b2r2_log_debug("B2R2_TTY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_TTY: \t0x%.8x\n",
node->node.GROUP1.B2R2_TTY);
- b2r2_log_debug("B2R2_TBA: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_TBA: \t0x%.8x\n",
node->node.GROUP1.B2R2_TBA);
- b2r2_log_debug("B2R2_S2CF: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S2CF: \t0x%.8x\n",
node->node.GROUP2.B2R2_S2CF);
- b2r2_log_debug("B2R2_S1CF: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S1CF: \t0x%.8x\n",
node->node.GROUP2.B2R2_S1CF);
- b2r2_log_debug("B2R2_S1SZ: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S1SZ: \t0x%.8x\n",
node->node.GROUP3.B2R2_SSZ);
- b2r2_log_debug("B2R2_S1XY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S1XY: \t0x%.8x\n",
node->node.GROUP3.B2R2_SXY);
- b2r2_log_debug("B2R2_S1TY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S1TY: \t0x%.8x\n",
node->node.GROUP3.B2R2_STY);
- b2r2_log_debug("B2R2_S1BA: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S1BA: \t0x%.8x\n",
node->node.GROUP3.B2R2_SBA);
- b2r2_log_debug("B2R2_S2SZ: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S2SZ: \t0x%.8x\n",
node->node.GROUP4.B2R2_SSZ);
- b2r2_log_debug("B2R2_S2XY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S2XY: \t0x%.8x\n",
node->node.GROUP4.B2R2_SXY);
- b2r2_log_debug("B2R2_S2TY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S2TY: \t0x%.8x\n",
node->node.GROUP4.B2R2_STY);
- b2r2_log_debug("B2R2_S2BA: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S2BA: \t0x%.8x\n",
node->node.GROUP4.B2R2_SBA);
- b2r2_log_debug("B2R2_S3SZ: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S3SZ: \t0x%.8x\n",
node->node.GROUP5.B2R2_SSZ);
- b2r2_log_debug("B2R2_S3XY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S3XY: \t0x%.8x\n",
node->node.GROUP5.B2R2_SXY);
- b2r2_log_debug("B2R2_S3TY: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S3TY: \t0x%.8x\n",
node->node.GROUP5.B2R2_STY);
- b2r2_log_debug("B2R2_S3BA: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_S3BA: \t0x%.8x\n",
node->node.GROUP5.B2R2_SBA);
- b2r2_log_debug("B2R2_CWS: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_CWS: \t0x%.8x\n",
node->node.GROUP6.B2R2_CWS);
- b2r2_log_debug("B2R2_CWO: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_CWO: \t0x%.8x\n",
node->node.GROUP6.B2R2_CWO);
- b2r2_log_debug("B2R2_FCTL: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_FCTL: \t0x%.8x\n",
node->node.GROUP8.B2R2_FCTL);
- b2r2_log_debug("B2R2_RSF: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_RSF: \t0x%.8x\n",
node->node.GROUP9.B2R2_RSF);
- b2r2_log_debug("B2R2_RZI: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_RZI: \t0x%.8x\n",
node->node.GROUP9.B2R2_RZI);
- b2r2_log_debug("B2R2_HFP: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_HFP: \t0x%.8x\n",
node->node.GROUP9.B2R2_HFP);
- b2r2_log_debug("B2R2_VFP: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_VFP: \t0x%.8x\n",
node->node.GROUP9.B2R2_VFP);
- b2r2_log_debug("B2R2_LUMA_RSF: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RSF: \t0x%.8x\n",
node->node.GROUP10.B2R2_RSF);
- b2r2_log_debug("B2R2_LUMA_RZI: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_RZI: \t0x%.8x\n",
node->node.GROUP10.B2R2_RZI);
- b2r2_log_debug("B2R2_LUMA_HFP: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_HFP: \t0x%.8x\n",
node->node.GROUP10.B2R2_HFP);
- b2r2_log_debug("B2R2_LUMA_VFP: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_LUMA_VFP: \t0x%.8x\n",
node->node.GROUP10.B2R2_VFP);
- b2r2_log_debug("B2R2_IVMX0: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_IVMX0: \t0x%.8x\n",
node->node.GROUP15.B2R2_VMX0);
- b2r2_log_debug("B2R2_IVMX1: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_IVMX1: \t0x%.8x\n",
node->node.GROUP15.B2R2_VMX1);
- b2r2_log_debug("B2R2_IVMX2: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_IVMX2: \t0x%.8x\n",
node->node.GROUP15.B2R2_VMX2);
- b2r2_log_debug("B2R2_IVMX3: \t0x%.8x\n",
+ b2r2_log_debug(cont->dev, "B2R2_IVMX3: \t0x%.8x\n",
node->node.GROUP15.B2R2_VMX3);
- b2r2_log_debug("\n=============\nNODE END\n");
+ b2r2_log_debug(cont->dev, "\n=============\nNODE END\n");
node = node->next;
} while (node != NULL && dump_all);
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
/**
* to_native_fmt() - returns the native B2R2 format
*/
-static inline enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
+static inline enum b2r2_native_fmt to_native_fmt(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
{
switch (fmt) {
@@ -216,7 +219,8 @@ static inline enum b2r2_native_fmt to_native_fmt(enum b2r2_blt_fmt fmt)
/**
* get_alpha_range() - returns the alpha range of the given format
*/
-static inline enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
+static inline enum b2r2_ty get_alpha_range(struct b2r2_control *cont,
+ enum b2r2_blt_fmt fmt)
{
switch (fmt) {
case B2R2_BLT_FMT_24_BIT_ARGB8565:
@@ -234,7 +238,8 @@ static inline enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt)
return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */
}
-static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
+static unsigned int get_pitch(struct b2r2_control *cont,
+ enum b2r2_blt_fmt format, u32 width)
{
switch (format) {
case B2R2_BLT_FMT_1_BIT_A1: {
@@ -269,7 +274,7 @@ static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
case B2R2_BLT_FMT_CB_Y_CR_Y:
/* width of the buffer must be a multiple of 4 */
if (width & 3) {
- b2r2_log_warn("%s: Illegal width "
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
"for fmt=%#010x width=%d\n", __func__,
format, width);
return 0;
@@ -290,7 +295,7 @@ static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR:
/* width of the buffer must be a multiple of 2 */
if (width & 1) {
- b2r2_log_warn("%s: Illegal width "
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
"for fmt=%#010x width=%d\n", __func__,
format, width);
return 0;
@@ -305,7 +310,7 @@ static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
/* width of the buffer must be a multiple of 16. */
if (width & 15) {
- b2r2_log_warn("%s: Illegal width "
+ b2r2_log_warn(cont->dev, "%s: Illegal width "
"for fmt=%#010x width=%d\n", __func__,
format, width);
return 0;
@@ -317,35 +322,36 @@ static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
return width;
break;
default:
- b2r2_log_warn("%s: Unable to determine pitch "
+ b2r2_log_warn(cont->dev, "%s: Unable to determine pitch "
"for fmt=%#010x width=%d\n", __func__,
format, width);
return 0;
}
}
-static s32 validate_buf(const struct b2r2_blt_img *image,
- const struct b2r2_resolved_buf *buf)
+static s32 validate_buf(struct b2r2_control *cont,
+ const struct b2r2_blt_img *image,
+ const struct b2r2_resolved_buf *buf)
{
u32 expect_buf_size;
u32 pitch;
if (image->width <= 0 || image->height <= 0) {
- b2r2_log_warn("%s: width=%d or height=%d negative.\n", __func__,
- image->width, image->height);
+ b2r2_log_warn(cont->dev, "%s: width=%d or height=%d negative"
+ ".\n", __func__, image->width, image->height);
return -EINVAL;
}
if (image->pitch == 0) {
/* autodetect pitch based on format and width */
- pitch = get_pitch(image->fmt, image->width);
+ pitch = get_pitch(cont, image->fmt, image->width);
} else
pitch = image->pitch;
expect_buf_size = pitch * image->height;
if (pitch == 0) {
- b2r2_log_warn("%s: Unable to detect pitch. "
+ b2r2_log_warn(cont->dev, "%s: Unable to detect pitch. "
"fmt=%#010x, width=%d\n",
__func__,
image->fmt, image->width);
@@ -393,7 +399,7 @@ static s32 validate_buf(const struct b2r2_blt_img *image,
case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE:
/* Height must be a multiple of 16 for macro-block format.*/
if (image->height & 15) {
- b2r2_log_warn("%s: Illegal height "
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
"for fmt=%#010x height=%d\n", __func__,
image->fmt, image->height);
return -EINVAL;
@@ -403,7 +409,7 @@ static s32 validate_buf(const struct b2r2_blt_img *image,
case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE:
/* Height must be a multiple of 16 for macro-block format.*/
if (image->height & 15) {
- b2r2_log_warn("%s: Illegal height "
+ b2r2_log_warn(cont->dev, "%s: Illegal height "
"for fmt=%#010x height=%d\n", __func__,
image->fmt, image->height);
return -EINVAL;
@@ -415,7 +421,7 @@ static s32 validate_buf(const struct b2r2_blt_img *image,
}
if (buf->file_len < expect_buf_size) {
- b2r2_log_warn("%s: Invalid buffer size:\n"
+ b2r2_log_warn(cont->dev, "%s: Invalid buffer size:\n"
"fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n",
__func__,
image->fmt, image->width, image->height, buf->file_len,
@@ -424,8 +430,8 @@ static s32 validate_buf(const struct b2r2_blt_img *image,
}
if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
- b2r2_log_warn("%s: Virtual pointers not supported yet.\n",
- __func__);
+ b2r2_log_warn(cont->dev, "%s: Virtual pointers not supported"
+ " yet.\n", __func__);
return -EINVAL;
}
return 0;
@@ -435,7 +441,8 @@ static s32 validate_buf(const struct b2r2_blt_img *image,
* Bit-expand the color from fmt to RGB888 with blue at LSB.
* Copy MSBs into missing LSBs.
*/
-static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt)
+static u32 to_RGB888(struct b2r2_control *cont, u32 color,
+ const enum b2r2_blt_fmt fmt)
{
u32 out_color = 0;
u32 r = 0;
@@ -491,7 +498,9 @@ static void setup_fill_input_stage(const struct b2r2_blt_request *req,
enum b2r2_native_fmt fill_fmt = 0;
u32 src_color = req->user_req.src_color;
const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img);
- b2r2_log_info("%s ENTRY\n", __func__);
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
/* Determine format in src_color */
switch (dst_img->fmt) {
@@ -509,7 +518,7 @@ static void setup_fill_input_stage(const struct b2r2_blt_request *req,
fill_fmt = B2R2_NATIVE_ARGB8888;
} else {
/* SOURCE_FILL_RAW */
- fill_fmt = to_native_fmt(dst_img->fmt);
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
/*
* Color is read from a register,
@@ -606,7 +615,7 @@ static void setup_fill_input_stage(const struct b2r2_blt_request *req,
*/
fill_fmt = B2R2_NATIVE_AYCBCR8888;
} else {
- fill_fmt = to_native_fmt(dst_img->fmt);
+ fill_fmt = to_native_fmt(cont, dst_img->fmt);
}
switch (dst_img->fmt) {
@@ -704,7 +713,7 @@ static void setup_fill_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP4.B2R2_STY =
(0 << B2R2_TY_BITMAP_PITCH_SHIFT) |
fill_fmt |
- get_alpha_range(dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
B2R2_TY_HSO_LEFT_TO_RIGHT |
B2R2_TY_VSO_TOP_TO_BOTTOM;
@@ -714,7 +723,7 @@ static void setup_fill_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP2.B2R2_S2CF = src_color;
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
static void setup_input_stage(const struct b2r2_blt_request *req,
@@ -756,23 +765,25 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
bool use_h_filter = false;
bool use_v_filter = false;
- b2r2_log_info("%s ENTRY\n", __func__);
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) &
req->user_req.flags) != 0) {
setup_fill_input_stage(req, node, out_buf);
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
return;
}
if (src_img->pitch == 0) {
/* Determine pitch based on format and width of the image. */
- src_pitch = get_pitch(src_img->fmt, src_img->width);
+ src_pitch = get_pitch(cont, src_img->fmt, src_img->width);
} else {
src_pitch = src_img->pitch;
}
- b2r2_log_info("%s transform=%#010x\n",
+ b2r2_log_info(cont->dev, "%s transform=%#010x\n",
__func__, req->user_req.transform);
if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
h_scf = (src_rect->width << 10) / dst_rect->height;
@@ -798,7 +809,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
/* Configure horizontal rescale */
if (h_scf != (1 << 10)) {
- b2r2_log_info("%s: Scaling horizontally by 0x%.8x"
+ b2r2_log_info(cont->dev, "%s: Scaling horizontally by 0x%.8x"
"\ns(%d, %d)->d(%d, %d)\n", __func__,
h_scf, src_rect->width, src_rect->height,
dst_rect->width, dst_rect->height);
@@ -810,7 +821,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
/* Configure vertical rescale */
if (v_scf != (1 << 10)) {
- b2r2_log_info("%s: Scaling vertically by 0x%.8x"
+ b2r2_log_info(cont->dev, "%s: Scaling vertically by 0x%.8x"
"\ns(%d, %d)->d(%d, %d)\n", __func__,
v_scf, src_rect->width, src_rect->height,
dst_rect->width, dst_rect->height);
@@ -1095,7 +1106,8 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
bool swapped_chroma =
src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
- enum b2r2_native_fmt src_fmt = to_native_fmt(src_img->fmt);
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
if (swapped_chroma)
cr_addr = req->src_resolved.physical_address +
@@ -1173,7 +1185,8 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
src_pitch * src_img->height;
u32 chroma_pitch = src_pitch;
- enum b2r2_native_fmt src_fmt = to_native_fmt(src_img->fmt);
+ enum b2r2_native_fmt src_fmt =
+ to_native_fmt(cont, src_img->fmt);
node->node.GROUP4.B2R2_SBA = chroma_addr;
node->node.GROUP4.B2R2_STY =
@@ -1199,8 +1212,8 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address;
node->node.GROUP4.B2R2_STY =
(src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
- to_native_fmt(src_img->fmt) |
- get_alpha_range(src_img->fmt) |
+ to_native_fmt(cont, src_img->fmt) |
+ get_alpha_range(cont, src_img->fmt) |
B2R2_TY_HSO_LEFT_TO_RIGHT |
B2R2_TY_VSO_TOP_TO_BOTTOM |
endianness;
@@ -1220,7 +1233,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
static void setup_transform_stage(const struct b2r2_blt_request *req,
@@ -1231,8 +1244,11 @@ static void setup_transform_stage(const struct b2r2_blt_request *req,
/* vertical scan order for out_buf */
enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
enum b2r2_blt_transform transform = req->user_req.transform;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
- b2r2_log_info("%s ENTRY\n", __func__);
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
/*
@@ -1265,7 +1281,7 @@ static void setup_transform_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2;
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
/*
@@ -1299,14 +1315,16 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR;
u32 dst_pitch = 0;
+ struct b2r2_control *cont = req->instance->control;
+
if (dst_img->pitch == 0) {
/* Determine pitch based on format and width of the image. */
- dst_pitch = get_pitch(dst_img->fmt, dst_img->width);
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
} else {
dst_pitch = dst_img->pitch;
}
- b2r2_log_info("%s ENTRY\n", __func__);
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
/* Adjustments that depend on the destination format */
switch (dst_img->fmt) {
@@ -1497,7 +1515,7 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
enum b2r2_native_fmt dst_native_fmt =
- to_native_fmt(dst_img->fmt);
+ to_native_fmt(cont, dst_img->fmt);
if (swapped_chroma)
cr_addr = req->dst_resolved.physical_address +
@@ -1573,7 +1591,7 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
u32 chroma_pitch = dst_pitch;
enum b2r2_native_fmt dst_native_fmt =
- to_native_fmt(dst_img->fmt);
+ to_native_fmt(cont, dst_img->fmt);
node->node.GROUP4.B2R2_SBA = chroma_addr;
node->node.GROUP4.B2R2_STY =
@@ -1599,8 +1617,8 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address;
node->node.GROUP4.B2R2_STY =
(dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
- to_native_fmt(dst_img->fmt) |
- get_alpha_range(dst_img->fmt) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
B2R2_TY_HSO_LEFT_TO_RIGHT |
B2R2_TY_VSO_TOP_TO_BOTTOM |
endianness;
@@ -1612,7 +1630,7 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
static void setup_blend_stage(const struct b2r2_blt_request *req,
@@ -1621,7 +1639,11 @@ static void setup_blend_stage(const struct b2r2_blt_request *req,
struct b2r2_work_buf *fg_buf)
{
u32 global_alpha = req->user_req.global_alpha;
- b2r2_log_info("%s ENTRY\n", __func__);
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
node->node.GROUP0.B2R2_ACK = 0;
@@ -1723,7 +1745,7 @@ static void setup_blend_stage(const struct b2r2_blt_request *req,
B2R2_TY_VSO_TOP_TO_BOTTOM;
}
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
static void setup_writeback_stage(const struct b2r2_blt_request *req,
@@ -1758,11 +1780,13 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
u32 dst_pitch = 0;
u32 endianness = 0;
- b2r2_log_info("%s ENTRY\n", __func__);
+ struct b2r2_control *cont = req->instance->control;
+
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
if (dst_img->pitch == 0) {
/* Determine pitch based on format and width of the image. */
- dst_pitch = get_pitch(dst_img->fmt, dst_img->width);
+ dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width);
} else
dst_pitch = dst_img->pitch;
@@ -1793,8 +1817,8 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR ||
dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR;
enum b2r2_native_fmt dst_native_fmt =
- to_native_fmt(dst_img->fmt);
- enum b2r2_ty alpha_range = get_alpha_range(dst_img->fmt);
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
if (swapped_chroma)
cr_addr = req->dst_resolved.physical_address +
@@ -1980,8 +2004,8 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
dst_pitch * dst_img->height;
u32 chroma_pitch = dst_pitch;
enum b2r2_native_fmt dst_native_fmt =
- to_native_fmt(dst_img->fmt);
- enum b2r2_ty alpha_range = get_alpha_range(dst_img->fmt);
+ to_native_fmt(cont, dst_img->fmt);
+ enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt);
if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR ||
dst_fmt ==
@@ -2154,8 +2178,8 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address;
node->node.GROUP1.B2R2_TTY =
(dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) |
- to_native_fmt(dst_img->fmt) |
- get_alpha_range(dst_img->fmt) |
+ to_native_fmt(cont, dst_img->fmt) |
+ get_alpha_range(cont, dst_img->fmt) |
B2R2_TY_HSO_LEFT_TO_RIGHT |
B2R2_TY_VSO_TOP_TO_BOTTOM |
dst_dither |
@@ -2179,7 +2203,7 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED;
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY;
- key_color = to_RGB888(req->user_req.src_color,
+ key_color = to_RGB888(cont, req->user_req.src_color,
req->user_req.src_img.fmt);
node->node.GROUP12.B2R2_KEY1 = key_color;
node->node.GROUP12.B2R2_KEY2 = key_color;
@@ -2195,20 +2219,20 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
*/
node->node.GROUP0.B2R2_NIP = 0;
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
/*
* Public functions
*/
-void b2r2_generic_init()
+void b2r2_generic_init(struct b2r2_control *cont)
{
- b2r2_filters_init();
+
}
-void b2r2_generic_exit(void)
+void b2r2_generic_exit(struct b2r2_control *cont)
{
- b2r2_filters_exit();
+
}
int b2r2_generic_analyze(const struct b2r2_blt_request *req,
@@ -2230,13 +2254,13 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
bool is_src_fill = false;
bool yuv_planar_dst;
bool yuv_semi_planar_dst;
-
struct b2r2_blt_rect src_rect;
struct b2r2_blt_rect dst_rect;
+ struct b2r2_control *cont = req->instance->control;
if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
work_buf_count == NULL || node_count == NULL) {
- b2r2_log_warn("%s: Invalid in or out pointers:\n"
+ b2r2_log_warn(cont->dev, "%s: Invalid in or out pointers:\n"
"req=0x%p\n"
"work_buf_width=0x%p work_buf_height=0x%p "
"work_buf_count=0x%p\n"
@@ -2281,7 +2305,8 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if ((yuv_planar_dst || yuv_semi_planar_dst) &&
(req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) {
- b2r2_log_warn("%s: Invalid combination: source_fill_raw"
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source_fill_raw"
" and multi-buffer destination.\n",
__func__);
return -EINVAL;
@@ -2289,7 +2314,8 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
(req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) {
- b2r2_log_warn("%s: Invalid combination: source and "
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid combination: source and "
"destination color keying.\n", __func__);
return -EINVAL;
}
@@ -2300,7 +2326,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
- b2r2_log_warn("%s: Invalid combination: "
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
"source_fill and color keying.\n",
__func__);
return -EINVAL;
@@ -2312,7 +2338,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
- b2r2_log_warn("%s: Invalid combination: "
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: "
"blending and color keying.\n",
__func__);
return -EINVAL;
@@ -2322,8 +2348,8 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
- b2r2_log_warn("%s: Invalid combination: source mask and "
- "color keying.\n",
+ b2r2_log_warn(cont->dev, "%s: Invalid combination: source mask"
+ "and color keying.\n",
__func__);
return -EINVAL;
}
@@ -2331,7 +2357,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if (req->user_req.flags &
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_MASK)) {
- b2r2_log_warn("%s: Unsupported: source mask, "
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
"destination color keying.\n",
__func__);
return -ENOSYS;
@@ -2355,9 +2381,9 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE;
if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 ||
src_fmt == B2R2_BLT_FMT_8_BIT_A8) {
- b2r2_log_warn("%s: Unsupported: source color keying "
- "with YUV or pure alpha formats.\n",
- __func__);
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source "
+ "color keying with YUV or pure alpha "
+ "formats.\n", __func__);
return -ENOSYS;
}
}
@@ -2369,7 +2395,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
src_rect.x + src_rect.width > req->user_req.src_img.width ||
src_rect.y + src_rect.height > req->user_req.src_img.height)) {
- b2r2_log_warn("%s: src_rect outside src_img:\n"
+ b2r2_log_warn(cont->dev, "%s: src_rect outside src_img:\n"
"src(x,y,w,h)=(%d, %d, %d, %d) "
"src_img(w,h)=(%d, %d).\n",
__func__,
@@ -2380,7 +2406,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
}
if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
- b2r2_log_warn("%s: Invalid source dimensions:\n"
+ b2r2_log_warn(cont->dev, "%s: Invalid source dimensions:\n"
"src(w,h)=(%d, %d).\n",
__func__,
src_rect.width, src_rect.height);
@@ -2388,7 +2414,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
}
if (dst_rect.width <= 0 || dst_rect.height <= 0) {
- b2r2_log_warn("%s: Invalid dest dimensions:\n"
+ b2r2_log_warn(cont->dev, "%s: Invalid dest dimensions:\n"
"dst(w,h)=(%d, %d).\n",
__func__,
dst_rect.width, dst_rect.height);
@@ -2397,18 +2423,18 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
req->user_req.clut == NULL) {
- b2r2_log_warn("%s: Invalid request: no table specified "
- "for CLUT color correction.\n",
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
__func__);
return -EINVAL;
}
/* Check for invalid image params */
- if (!is_src_fill && validate_buf(&(req->user_req.src_img),
+ if (!is_src_fill && validate_buf(cont, &(req->user_req.src_img),
&(req->src_resolved)))
return -EINVAL;
- if (validate_buf(&(req->user_req.dst_img), &(req->dst_resolved)))
+ if (validate_buf(cont, &(req->user_req.dst_img), &(req->dst_resolved)))
return -EINVAL;
if (is_src_fill) {
@@ -2425,9 +2451,8 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
*work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
*work_buf_count = n_work_bufs;
*node_count = n_nodes;
- b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d "
- "node_count=%d\n",
- __func__,
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d "
+ "buf_count=%d node_count=%d\n", __func__,
*work_buf_width, *work_buf_height,
*work_buf_count, *node_count);
return 0;
@@ -2447,7 +2472,8 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
/* Check for degenerate/out_of_range scaling factors. */
if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
- b2r2_log_warn("%s: Dimensions result in degenerate or "
+ b2r2_log_warn(cont->dev,
+ "%s: Dimensions result in degenerate or "
"out of range scaling:\n"
"src(w,h)=(%d, %d) "
"dst(w,h)=(%d,%d).\n"
@@ -2468,10 +2494,9 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
*work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT;
*work_buf_count = n_work_bufs;
*node_count = n_nodes;
- b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
- __func__,
- *work_buf_width, *work_buf_height, *work_buf_count,
- *node_count);
+ b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d buf_count=%d "
+ "node_count=%d\n", __func__, *work_buf_width,
+ *work_buf_height, *work_buf_count, *node_count);
return 0;
}
@@ -2487,6 +2512,7 @@ int b2r2_generic_configure(const struct b2r2_blt_request *req,
struct b2r2_work_buf *in_buf = NULL;
struct b2r2_work_buf *out_buf = NULL;
struct b2r2_work_buf *empty_buf = NULL;
+ struct b2r2_control *cont = req->instance->control;
#ifdef B2R2_GENERIC_DEBUG
u32 needed_bufs = 0;
@@ -2498,7 +2524,8 @@ int b2r2_generic_configure(const struct b2r2_blt_request *req,
&work_buf_height, &needed_bufs,
&needed_nodes);
if (invalid_req < 0) {
- b2r2_log_warn("%s: Invalid request supplied, ec=%d\n",
+ b2r2_log_warn(cont->dev,
+ "%s: Invalid request supplied, ec=%d\n",
__func__, invalid_req);
return -EINVAL;
}
@@ -2510,20 +2537,20 @@ int b2r2_generic_configure(const struct b2r2_blt_request *req,
node = node->next;
}
if (n_nodes < needed_nodes) {
- b2r2_log_warn("%s: Not enough nodes %d < %d.\n",
+ b2r2_log_warn(cont->dev, "%s: Not enough nodes %d < %d.\n",
__func__, n_nodes, needed_nodes);
return -EINVAL;
}
if (buf_count < needed_bufs) {
- b2r2_log_warn("%s: Not enough buffers %d < %d.\n",
+ b2r2_log_warn(cont->dev, "%s: Not enough buffers %d < %d.\n",
__func__, buf_count, needed_bufs);
return -EINVAL;
}
#endif
- reset_nodes(first);
+ reset_nodes(cont, first);
node = first;
empty_buf = tmp_bufs;
out_buf = empty_buf;
@@ -2611,8 +2638,9 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
/* Dst coords inside the dst_rect, not the buffer */
s32 dst_x = dst_rect_area->x;
s32 dst_y = dst_rect_area->y;
+ struct b2r2_control *cont = req->instance->control;
- b2r2_log_info("%s ENTRY\n", __func__);
+ b2r2_log_info(cont->dev, "%s ENTRY\n", __func__);
if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
h_scf = (src_rect->width << 10) / dst_rect->height;
@@ -2907,8 +2935,8 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Input node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Input node done.\n", __func__);
}
/* Transform */
@@ -2945,8 +2973,9 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Tranform node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Tranform node done.\n", __func__);
}
}
@@ -2962,8 +2991,9 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
*/
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Source mask node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Source mask node done.\n", __func__);
}
}
@@ -3079,8 +3109,8 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s dst_read node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s dst_read node done.\n", __func__);
}
/* blend */
@@ -3102,8 +3132,8 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Blend node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Blend node done.\n", __func__);
}
/* writeback */
@@ -3177,9 +3207,9 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Writeback luma node done.\n",
- __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev,
+ "%s Writeback luma node done.\n", __func__);
}
node = node->next;
@@ -3268,9 +3298,9 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Writeback chroma node "
- "%d of %d done.\n",
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback chroma "
+ "node %d of %d done.\n",
__func__, i + 1, n_nodes);
}
@@ -3294,10 +3324,11 @@ void b2r2_generic_set_areas(const struct b2r2_blt_request *req,
if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 &&
dst_rect_area->y == 0) {
- dump_nodes(node, false);
- b2r2_log_debug("%s Writeback node done.\n", __func__);
+ dump_nodes(cont, node, false);
+ b2r2_log_debug(cont->dev, "%s Writeback node done.\n",
+ __func__);
}
}
- b2r2_log_info("%s DONE\n", __func__);
+ b2r2_log_info(cont->dev, "%s DONE\n", __func__);
}
diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h
index 35451543c5c..3b22f654deb 100644
--- a/drivers/video/b2r2/b2r2_generic.h
+++ b/drivers/video/b2r2/b2r2_generic.h
@@ -20,12 +20,12 @@
/**
* b2r2_generic_init()
*/
-void b2r2_generic_init(void);
+void b2r2_generic_init(struct b2r2_control *cont);
/**
* b2r2_generic_exit()
*/
-void b2r2_generic_exit(void);
+void b2r2_generic_exit(struct b2r2_control *cont);
/**
* b2r2_generic_analyze()
diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c
index 602041c9294..ac8b5728847 100644
--- a/drivers/video/b2r2/b2r2_input_validation.c
+++ b/drivers/video/b2r2/b2r2_input_validation.c
@@ -19,8 +19,8 @@
*/
+#include "b2r2_internal.h"
#include "b2r2_input_validation.h"
-
#include "b2r2_debug.h"
#include "b2r2_utils.h"
@@ -32,16 +32,18 @@
static bool is_valid_format(enum b2r2_blt_fmt fmt);
static bool is_valid_bg_format(enum b2r2_blt_fmt fmt);
-static bool is_valid_pitch_for_fmt(u32 pitch, s32 width,
- enum b2r2_blt_fmt fmt);
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt);
static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt);
static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt);
static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt);
-static bool validate_img(struct b2r2_blt_img *img);
-static bool validate_rect(struct b2r2_blt_rect *rect);
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect);
static bool is_valid_format(enum b2r2_blt_fmt fmt)
@@ -101,15 +103,16 @@ static bool is_valid_bg_format(enum b2r2_blt_fmt fmt)
}
-static bool is_valid_pitch_for_fmt(u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
+static bool is_valid_pitch_for_fmt(struct b2r2_control *cont,
+ u32 pitch, s32 width, enum b2r2_blt_fmt fmt)
{
s32 complete_width;
u32 pitch_derived_from_width;
complete_width = width_2_complete_width(width, fmt);
- pitch_derived_from_width =
- b2r2_calc_pitch_from_width(complete_width, fmt);
+ pitch_derived_from_width = b2r2_calc_pitch_from_width(cont,
+ complete_width, fmt);
if (pitch < pitch_derived_from_width)
return false;
@@ -260,7 +263,8 @@ static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt)
return true;
}
-static bool validate_img(struct b2r2_blt_img *img)
+static bool validate_img(struct b2r2_control *cont,
+ struct b2r2_blt_img *img)
{
/*
* So that we always can do width * height * bpp without overflowing a
@@ -272,13 +276,14 @@ static bool validate_img(struct b2r2_blt_img *img)
s32 img_size;
if (!is_valid_format(img->fmt)) {
- b2r2_log_info("Validation Error: !is_valid_format(img->fmt)\n");
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_format(img->fmt)\n");
return false;
}
if (img->width < 0 || img->width > max_img_width_height ||
img->height < 0 || img->height > max_img_width_height) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev, "Validation Error: "
"img->width < 0 || "
"img->width > max_img_width_height || "
"img->height < 0 || "
@@ -288,7 +293,7 @@ static bool validate_img(struct b2r2_blt_img *img)
if (b2r2_is_mb_fmt(img->fmt)) {
if (!is_complete_width_for_fmt(img->width, img->fmt)) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev, "Validation Error: "
"!is_complete_width_for_fmt(img->width,"
" img->fmt)\n");
return false;
@@ -297,7 +302,8 @@ static bool validate_img(struct b2r2_blt_img *img)
if (0 == img->pitch &&
(!is_aligned_width_for_fmt(img->width, img->fmt) ||
!is_complete_width_for_fmt(img->width, img->fmt))) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
"0 == img->pitch && "
"(!is_aligned_width_for_fmt(img->width,"
" img->fmt) || "
@@ -307,24 +313,24 @@ static bool validate_img(struct b2r2_blt_img *img)
}
if (img->pitch != 0 &&
- !is_valid_pitch_for_fmt(img->pitch, img->width,
- img->fmt)) {
- b2r2_log_info("Validation Error: "
- "img->pitch != 0 && "
- "!is_valid_pitch_for_fmt(img->pitch, "
- "img->width, img->fmt)\n");
+ !is_valid_pitch_for_fmt(cont, img->pitch, img->width,
+ img->fmt)) {
+ b2r2_log_info(cont->dev,
+ "Validation Error: "
+ "img->pitch != 0 && "
+ "!is_valid_pitch_for_fmt(cont, "
+ "img->pitch, img->width, img->fmt)\n");
return false;
}
}
if (!is_valid_height_for_fmt(img->width, img->fmt)) {
- b2r2_log_info("Validation Error: "
- "!is_valid_height_for_fmt(img->width, "
- "img->fmt)\n");
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_height_for_fmt(img->width, img->fmt)\n");
return false;
}
- img_size = b2r2_get_img_size(img);
+ img_size = b2r2_get_img_size(cont, img);
/*
* To keep the entire image inside s32 range.
@@ -332,7 +338,7 @@ static bool validate_img(struct b2r2_blt_img *img)
if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type ||
B2R2_BLT_PTR_FD_OFFSET == img->buf.type) &&
img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev, "Validation Error: "
"(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == "
"img->buf.type || B2R2_BLT_PTR_FD_OFFSET == "
"img->buf.type) && img->buf.offset > "
@@ -343,10 +349,11 @@ static bool validate_img(struct b2r2_blt_img *img)
return true;
}
-static bool validate_rect(struct b2r2_blt_rect *rect)
+static bool validate_rect(struct b2r2_control *cont,
+ struct b2r2_blt_rect *rect)
{
if (rect->width < 0 || rect->height < 0) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev, "Validation Error: "
"rect->width < 0 || rect->height < 0\n");
return false;
}
@@ -354,7 +361,8 @@ static bool validate_rect(struct b2r2_blt_rect *rect)
return true;
}
-bool b2r2_validate_user_req(struct b2r2_blt_req *req)
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
{
bool is_src_img_used;
bool is_bg_img_used;
@@ -362,43 +370,43 @@ bool b2r2_validate_user_req(struct b2r2_blt_req *req)
bool is_dst_clip_rect_used;
if (req->size != sizeof(struct b2r2_blt_req)) {
- b2r2_log_err("Validation Error: "
+ b2r2_log_err(cont->dev, "Validation Error: "
"req->size != sizeof(struct b2r2_blt_req)\n");
return false;
}
is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL ||
- req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
+ req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW);
is_bg_img_used = (req->flags & B2R2_BLT_FLAG_BG_BLEND);
is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK;
is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP;
if (is_src_img_used || is_src_mask_used) {
- if (!validate_rect(&req->src_rect)) {
- b2r2_log_info("Validation Error: "
- "!validate_rect(&req->src_rect)\n");
+ if (!validate_rect(cont, &req->src_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->src_rect)\n");
return false;
}
}
- if (!validate_rect(&req->dst_rect)) {
- b2r2_log_info("Validation Error: "
- "!validate_rect(&req->dst_rect)\n");
+ if (!validate_rect(cont, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_rect)\n");
return false;
}
if (is_bg_img_used) {
- if (!validate_rect(&req->bg_rect)) {
- b2r2_log_info("Validation Error: "
- "!validate_rect(&req->bg_rect)\n");
+ if (!validate_rect(cont, &req->bg_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->bg_rect)\n");
return false;
}
}
if (is_dst_clip_rect_used) {
- if (!validate_rect(&req->dst_clip_rect)) {
- b2r2_log_info("Validation Error: "
- "!validate_rect(&req->dst_clip_rect)\n");
+ if (!validate_rect(cont, &req->dst_clip_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_rect(cont, &req->dst_clip_rect)\n");
return false;
}
}
@@ -406,17 +414,17 @@ bool b2r2_validate_user_req(struct b2r2_blt_req *req)
if (is_src_img_used) {
struct b2r2_blt_rect src_img_bounding_rect;
- if (!validate_img(&req->src_img)) {
- b2r2_log_info("Validation Error: "
- "!validate_img(&req->src_img)\n");
+ if (!validate_img(cont, &req->src_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_img)\n");
return false;
}
b2r2_get_img_bounding_rect(&req->src_img,
&src_img_bounding_rect);
if (!b2r2_is_rect_inside_rect(&req->src_rect,
- &src_img_bounding_rect)) {
- b2r2_log_info("Validation Error: "
+ &src_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
"!b2r2_is_rect_inside_rect(&req->src_rect, "
"&src_img_bounding_rect)\n");
return false;
@@ -426,23 +434,23 @@ bool b2r2_validate_user_req(struct b2r2_blt_req *req)
if (is_bg_img_used) {
struct b2r2_blt_rect bg_img_bounding_rect;
- if (!validate_img(&req->bg_img)) {
- b2r2_log_info("Validation Error: "
- "!validate_img(&req->bg_img)\n");
+ if (!validate_img(cont, &req->bg_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->bg_img)\n");
return false;
}
if (!is_valid_bg_format(req->bg_img.fmt)) {
- b2r2_log_info("Validation Error: "
- "!is_valid_bg_format(req->bg_img->fmt)\n");
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!is_valid_bg_format(req->bg_img->fmt)\n");
return false;
}
b2r2_get_img_bounding_rect(&req->bg_img,
- &bg_img_bounding_rect);
+ &bg_img_bounding_rect);
if (!b2r2_is_rect_inside_rect(&req->bg_rect,
- &bg_img_bounding_rect)) {
- b2r2_log_info("Validation Error: "
+ &bg_img_bounding_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
"!b2r2_is_rect_inside_rect(&req->bg_rect, "
"&bg_img_bounding_rect)\n");
return false;
@@ -452,33 +460,32 @@ bool b2r2_validate_user_req(struct b2r2_blt_req *req)
if (is_src_mask_used) {
struct b2r2_blt_rect src_mask_bounding_rect;
- if (!validate_img(&req->src_mask)) {
- b2r2_log_info("Validation Error: "
- "!validate_img(&req->src_mask)\n");
+ if (!validate_img(cont, &req->src_mask)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->src_mask)\n");
return false;
}
b2r2_get_img_bounding_rect(&req->src_mask,
- &src_mask_bounding_rect);
+ &src_mask_bounding_rect);
if (!b2r2_is_rect_inside_rect(&req->src_rect,
&src_mask_bounding_rect)) {
- b2r2_log_info("Validation Error: "
+ b2r2_log_info(cont->dev, "Validation Error: "
"!b2r2_is_rect_inside_rect(&req->src_rect, "
- "&src_mask_bounding_rect)\n");
+ "&src_mask_bounding_rect)\n");
return false;
}
}
- if (!validate_img(&req->dst_img)) {
- b2r2_log_info("Validation Error: "
- "!validate_img(&req->dst_img)\n");
+ if (!validate_img(cont, &req->dst_img)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
+ "!validate_img(cont, &req->dst_img)\n");
return false;
}
if (is_bg_img_used) {
- if (!b2r2_is_rect_gte_rect(&req->bg_rect,
- &req->dst_rect)) {
- b2r2_log_info("Validation Error: "
+ if (!b2r2_is_rect_gte_rect(&req->bg_rect, &req->dst_rect)) {
+ b2r2_log_info(cont->dev, "Validation Error: "
"!b2r2_is_rect_gte_rect(&req->bg_rect, "
"&req->dst_rect)\n");
return false;
diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h
index 9a736343e06..d3c6ae1b296 100644
--- a/drivers/video/b2r2/b2r2_input_validation.h
+++ b/drivers/video/b2r2/b2r2_input_validation.h
@@ -23,6 +23,9 @@
#include <video/b2r2_blt.h>
-bool b2r2_validate_user_req(struct b2r2_blt_req *req);
+#include "b2r2_internal.h"
+
+bool b2r2_validate_user_req(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
#endif
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
index 718d1ad40c9..7a46bbda19e 100644
--- a/drivers/video/b2r2/b2r2_internal.h
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -13,14 +13,18 @@
#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_
-
+#include <linux/device.h>
+#include <linux/miscdevice.h>
#include <video/b2r2_blt.h>
-#include "b2r2_core.h"
#include "b2r2_global.h"
-
#include "b2r2_hw.h"
+/**
+ * B2R2_MAX_NBR_DEVICES - The maximum number of B2R2s handled
+ */
+#define B2R2_MAX_NBR_DEVICES 1
+
/* The maximum possible number of temporary buffers needed */
#define MAX_TMP_BUFS_NEEDED 2
@@ -28,12 +32,99 @@
#define CLUT_SIZE 1024
/**
- * b2r2_blt_device() - Returns the device associated with B2R2 BLT.
- * Mainly for debugging with dev_... functions.
+ * b2r2_op_type - the type of B2R2 operation to configure
+ */
+enum b2r2_op_type {
+ B2R2_DIRECT_COPY,
+ B2R2_DIRECT_FILL,
+ B2R2_COPY,
+ B2R2_FILL,
+ B2R2_SCALE,
+ B2R2_ROTATE,
+ B2R2_SCALE_AND_ROTATE,
+ B2R2_FLIP,
+};
+
+/**
+ * b2r2_fmt_type - the type of buffer for a given format
+ */
+enum b2r2_fmt_type {
+ B2R2_FMT_TYPE_RASTER,
+ B2R2_FMT_TYPE_SEMI_PLANAR,
+ B2R2_FMT_TYPE_PLANAR,
+};
+
+/**
+ * b2r2_fmt_conv - the type of format conversion to do
+ */
+enum b2r2_fmt_conv {
+ B2R2_FMT_CONV_NONE,
+ B2R2_FMT_CONV_RGB_TO_YUV,
+ B2R2_FMT_CONV_YUV_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_YUV,
+ B2R2_FMT_CONV_RGB_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_RGB,
+ B2R2_FMT_CONV_YUV_TO_BGR,
+ B2R2_FMT_CONV_BGR_TO_YUV,
+};
+
+/**
+ * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to
+ *
+ * @B2R2_CORE_QUEUE_AQ1: Application queue 1
+ * @B2R2_CORE_QUEUE_AQ2: Application queue 2
+ * @B2R2_CORE_QUEUE_AQ3: Application queue 3
+ * @B2R2_CORE_QUEUE_AQ4: Application queue 4
+ * @B2R2_CORE_QUEUE_CQ1: Composition queue 1
+ * @B2R2_CORE_QUEUE_CQ2: Composition queue 2
+ * @B2R2_CORE_QUEUE_NO_OF: Number of queues
+ */
+enum b2r2_core_queue {
+ B2R2_CORE_QUEUE_AQ1 = 0,
+ B2R2_CORE_QUEUE_AQ2,
+ B2R2_CORE_QUEUE_AQ3,
+ B2R2_CORE_QUEUE_AQ4,
+ B2R2_CORE_QUEUE_CQ1,
+ B2R2_CORE_QUEUE_CQ2,
+ B2R2_CORE_QUEUE_NO_OF,
+};
+
+#define B2R2_NUM_APPLICATIONS_QUEUES 4
+
+/**
+ * enum b2r2_core_job_state - Indicates the current state of the job
*
- * Returns the device pointer or NULL
+ * @B2R2_CORE_JOB_IDLE: Never queued
+ * @B2R2_CORE_JOB_QUEUED: In queue but not started yet
+ * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2
+ * @B2R2_CORE_JOB_DONE: Completed
+ * @B2R2_CORE_JOB_CANCELED: Canceled
*/
-struct device *b2r2_blt_device(void);
+enum b2r2_core_job_state {
+ B2R2_CORE_JOB_IDLE = 0,
+ B2R2_CORE_JOB_QUEUED,
+ B2R2_CORE_JOB_RUNNING,
+ B2R2_CORE_JOB_DONE,
+ B2R2_CORE_JOB_CANCELED,
+};
+
+/**
+ * b2r2_work_buf - specification for a temporary work buffer
+ *
+ * @size - the size of the buffer (set by b2r2_node_split)
+ * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
+ */
+struct b2r2_work_buf {
+ u32 size;
+ u32 phys_addr;
+ void *virt_addr;
+ u32 mem_handle;
+};
+
+struct tmp_buf {
+ struct b2r2_work_buf buf;
+ bool in_use;
+};
/**
* struct b2r2_blt_instance - Represents the B2R2 instance (one per open)
@@ -46,6 +137,7 @@ struct device *b2r2_blt_device(void);
* in callback.
* @synching: true if any client is waiting for b2r2_blt_synch(0)
* @synch_done_waitq: Wait queue to handle synching on request_id 0
+ * @control: The b2r2 control entity
*/
struct b2r2_blt_instance {
struct mutex lock;
@@ -58,6 +150,8 @@ struct b2r2_blt_instance {
u32 no_of_active_requests;
bool synching;
wait_queue_head_t synch_done_waitq;
+
+ struct b2r2_control *control;
};
/**
@@ -111,58 +205,6 @@ struct b2r2_resolved_buf {
u32 file_len;
};
-
-/**
- * b2r2_work_buf - specification for a temporary work buffer
- *
- * @size - the size of the buffer (set by b2r2_node_split)
- * @phys_addr - the physical address of the buffer (set by b2r2_blt_main)
- */
-struct b2r2_work_buf {
- u32 size;
- u32 phys_addr;
- void *virt_addr;
- u32 mem_handle;
-};
-
-
-/**
- * b2r2_op_type - the type of B2R2 operation to configure
- */
-enum b2r2_op_type {
- B2R2_DIRECT_COPY,
- B2R2_DIRECT_FILL,
- B2R2_COPY,
- B2R2_FILL,
- B2R2_SCALE,
- B2R2_ROTATE,
- B2R2_SCALE_AND_ROTATE,
- B2R2_FLIP,
-};
-
-/**
- * b2r2_fmt_type - the type of buffer for a given format
- */
-enum b2r2_fmt_type {
- B2R2_FMT_TYPE_RASTER,
- B2R2_FMT_TYPE_SEMI_PLANAR,
- B2R2_FMT_TYPE_PLANAR,
-};
-
-/**
- * b2r2_fmt_conv - the type of format conversion to do
- */
-enum b2r2_fmt_conv {
- B2R2_FMT_CONV_NONE,
- B2R2_FMT_CONV_RGB_TO_YUV,
- B2R2_FMT_CONV_YUV_TO_RGB,
- B2R2_FMT_CONV_YUV_TO_YUV,
- B2R2_FMT_CONV_RGB_TO_BGR,
- B2R2_FMT_CONV_BGR_TO_RGB,
- B2R2_FMT_CONV_YUV_TO_BGR,
- B2R2_FMT_CONV_BGR_TO_YUV,
-};
-
/**
* b2r2_node_split_buf - information about a source or destination buffer
*
@@ -281,6 +323,89 @@ struct b2r2_node_split_job {
};
/**
+ * struct b2r2_core_job - Represents a B2R2 core job
+ *
+ * @start_sentinel: Memory overwrite guard
+ *
+ * @tag: Client value. Used by b2r2_core_job_find_first_with_tag().
+ * @prio: Job priority, from -19 up to 20. Mapped to the
+ * B2R2 application queues. Filled in by the client.
+ * @first_node_address: Physical address of the first node. Filled
+ * in by the client.
+ * @last_node_address: Physical address of the last node. Filled
+ * in by the client.
+ *
+ * @callback: Function that will be called when the job is done.
+ * @acquire_resources: Function that allocates the resources needed
+ * to execute the job (i.e. SRAM alloc). Must not
+ * sleep if atomic, should fail with negative error code
+ * if resources not available.
+ * @release_resources: Function that releases the resources previously
+ * allocated by acquire_resources (i.e. SRAM alloc).
+ * @release: Function that will be called when the reference count reaches
+ * zero.
+ *
+ * @job_id: Unique id for this job, assigned by B2R2 core
+ * @job_state: The current state of the job
+ * @jiffies: Number of jiffies needed for this request
+ *
+ * @list: List entry element for internal list management
+ * @event: Wait queue event to wait for job done
+ * @work: Work queue structure, for callback implementation
+ *
+ * @queue: The queue that this job shall be submitted to
+ * @control: B2R2 Queue control
+ * @pace_control: For composition queue only
+ * @interrupt_context: Context for interrupt
+ * @hw_start_time: The point when the b2r2 HW queue is activated for this job
+ * @nsec_active_in_hw: Time spent on the b2r2 HW queue for this job
+ *
+ * @end_sentinel: Memory overwrite guard
+ */
+struct b2r2_core_job {
+ u32 start_sentinel;
+
+ /* Data to be filled in by client */
+ int tag;
+ int prio;
+ u32 first_node_address;
+ u32 last_node_address;
+ void (*callback)(struct b2r2_core_job *);
+ int (*acquire_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release_resources)(struct b2r2_core_job *,
+ bool atomic);
+ void (*release)(struct b2r2_core_job *);
+
+ /* Output data, do not modify */
+ int job_id;
+ enum b2r2_core_job_state job_state;
+ unsigned long jiffies;
+
+ /* Data below is internal to b2r2_core, do not modify */
+
+ /* Reference counting */
+ u32 ref_count;
+
+ /* Internal data */
+ struct list_head list;
+ wait_queue_head_t event;
+ struct work_struct work;
+
+ /* B2R2 HW data */
+ enum b2r2_core_queue queue;
+ u32 control;
+ u32 pace_control;
+ u32 interrupt_context;
+
+ /* Timing data */
+ u32 hw_start_time;
+ s32 nsec_active_in_hw;
+
+ u32 end_sentinel;
+};
+
+/**
* struct b2r2_blt_request - Represents one B2R2 blit request
*
* @instance: Back pointer to the instance structure
@@ -330,6 +455,105 @@ struct b2r2_blt_request {
s32 total_time_nsec;
};
+/**
+ * struct b2r2_mem_heap - The memory heap
+ *
+ * @start_phys_addr: Physical memory start address
+ * @start_virt_ptr: Virtual pointer to start
+ * @size: Memory size
+ * @align: Alignment
+ * @blocks: List of all blocks
+ * @heap_lock: Protection for the heap
+ * @node_size: Size of each B2R2 node
+ * @node_heap: Heap for B2R2 node allocations
+ * @debugfs_root_dir: Debugfs B2R2 mem root dir
+ * @debugfs_heap_stats: Debugfs B2R2 memory status
+ * @debugfs_dir_blocks: Debugfs B2R2 free blocks dir
+ */
+struct b2r2_mem_heap {
+ dma_addr_t start_phys_addr;
+ void *start_virt_ptr;
+ u32 size;
+ u32 align;
+ struct list_head blocks;
+ spinlock_t heap_lock;
+ u32 node_size;
+ struct dma_pool *node_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_heap_stats;
+ struct dentry *debugfs_dir_blocks;
+#endif
+};
+
+/**
+ *
+ * @miscdev: The miscdev presenting b2r2 to the system
+ * @dev: The device handle of the b2r2 instance
+ * @id: The id of the b2r2 instance
+ * @name: The name of the b2r2 instance
+ * @data: Used to store a reference to b2r2_core
+ * @tmp_bufs: Temporary buffers needed in the node splitter
+ * @filters_initialized: Indicating of filters has been
+ * initialized for this b2r2 instance
+ * @mem_heap: The b2r2 heap, e.g. used to allocate nodes
+ * @debugfs_latest_request: Copy of the latest request issued
+ * @debugfs_root_dir: The debugfs root directory, e.g. /debugfs/b2r2
+ * @debugfs_debug_root_dir: The b2r2 debug root directory,
+ * e.g. /debugfs/b2r2/debug
+ * @stat_lock: Spin lock protecting the statistics
+ * @stat_n_jobs_added: Number of jobs added to b2r2_core
+ * @stat_n_jobs_released: Number of jobs released (job_release called)
+ * @stat_n_jobs_in_report_list: Number of jobs currently in the report list
+ * @stat_n_in_blt: Number of client threads currently exec inside b2r2_blt()
+ * @stat_n_in_blt_synch: Number of client threads currently waiting for synch
+ * @stat_n_in_blt_add: Number of client threads currenlty adding in b2r2_blt
+ * @stat_n_in_blt_wait: Number of client threads currently waiting in b2r2_blt
+ * @stat_n_in_synch_0: Number of client threads currently in b2r2_blt_sync
+ * waiting for all client jobs to finish
+ * @stat_n_in_synch_job: Number of client threads currently in b2r2_blt_sync
+ * waiting specific job to finish
+ * @stat_n_in_query_cap: Number of clients currently in query cap
+ * @stat_n_in_open: Number of clients currently in b2r2_blt_open
+ * @stat_n_in_release: Number of clients currently in b2r2_blt_release
+ * @last_job_lock: Mutex protecting last_job
+ * @last_job: The last running job on this b2r2 instance
+ * @last_job_chars: Temporary buffer used in printing last_job
+ * @prev_node_count: Node cound of last_job
+ */
+struct b2r2_control {
+ struct miscdevice miscdev;
+ struct device *dev;
+ int id;
+ char name[16];
+ void *data;
+ struct tmp_buf tmp_bufs[MAX_TMP_BUFS_NEEDED];
+ int filters_initialized;
+ struct b2r2_mem_heap mem_heap;
+#ifdef CONFIG_DEBUG_FS
+ struct b2r2_blt_request debugfs_latest_request;
+ struct dentry *debugfs_root_dir;
+ struct dentry *debugfs_debug_root_dir;
+#endif
+ struct mutex stat_lock;
+ unsigned long stat_n_jobs_added;
+ unsigned long stat_n_jobs_released;
+ unsigned long stat_n_jobs_in_report_list;
+ unsigned long stat_n_in_blt;
+ unsigned long stat_n_in_blt_synch;
+ unsigned long stat_n_in_blt_add;
+ unsigned long stat_n_in_blt_wait;
+ unsigned long stat_n_in_synch_0;
+ unsigned long stat_n_in_synch_job;
+ unsigned long stat_n_in_query_cap;
+ unsigned long stat_n_in_open;
+ unsigned long stat_n_in_release;
+ struct mutex last_job_lock;
+ struct b2r2_node *last_job;
+ char *last_job_chars;
+ int prev_node_count;
+};
+
/* FIXME: The functions below should be removed when we are
switching to the new Robert Lind allocator */
@@ -341,7 +565,8 @@ struct b2r2_blt_request {
* Return:
* Returns a pointer to the first node in the node list.
*/
-struct b2r2_node *b2r2_blt_alloc_nodes(int node_count);
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int node_count);
/**
* b2r2_blt_free_nodes() - Release nodes previously allocated via
@@ -349,16 +574,17 @@ struct b2r2_node *b2r2_blt_alloc_nodes(int node_count);
*
* @first_node: First node in linked list of nodes
*/
-void b2r2_blt_free_nodes(struct b2r2_node *first_node);
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node);
/**
* b2r2_blt_module_init() - Initialize the B2R2 blt module
*/
-int b2r2_blt_module_init(void);
+int b2r2_blt_module_init(struct b2r2_control *cont);
/**
* b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
*/
-void b2r2_blt_module_exit(void);
+void b2r2_blt_module_exit(struct b2r2_control *cont);
#endif
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c
index 0dcd9b6a55e..e5235d2c97f 100644
--- a/drivers/video/b2r2/b2r2_mem_alloc.c
+++ b/drivers/video/b2r2/b2r2_mem_alloc.c
@@ -24,56 +24,12 @@
#include "b2r2_internal.h"
#include "b2r2_mem_alloc.h"
-
-/* Represents one block of b2r2 physical memory, free or allocated */
-struct b2r2_mem_block {
- struct list_head list; /* For membership in list */
-
- u32 offset; /* Offset in b2r2 physical memory area (aligned) */
- u32 size; /* Size of the object (requested size if busy,
- else actual) */
- bool free; /* True if the block is free */
-
- u32 lock_count; /* Lock count */
-
-#ifdef CONFIG_DEBUG_FS
- char debugfs_fname[80]; /* debugfs file name */
- struct dentry *debugfs_block; /* debugfs dir entry for the block */
-#endif
-};
-
-/* The memory heap */
-struct b2r2_mem_heap {
- struct device *dev; /* Device pointer for memory allocation */
- dma_addr_t start_phys_addr;/* Physical memory start address */
- void *start_virt_ptr; /* Virtual pointer to start */
- u32 size; /* Memory size */
- u32 align; /* Alignment */
-
- struct list_head blocks; /* List of all blocks */
-
- spinlock_t heap_lock; /* Protection for the heap */
-
- u32 node_size; /* Size of each B2R2 node */
- struct dma_pool *node_heap;/* Heap for B2R2 node allocations */
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_sub_root_dir; /* debugfs: B2R2 MEM root dir */
- struct dentry *debugfs_heap_stats; /* debugfs: B2R2 memory status */
- struct dentry *debugfs_dir_blocks; /* debugfs: Free blocks dir */
-#endif
-};
-
-static struct b2r2_mem_heap *mem_heap;
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *debugfs_root_dir; /* debugfs: B2R2 MEM root dir */
-#endif
-
/* Forward declarations */
static struct b2r2_mem_block *b2r2_mem_block_alloc(
- u32 offset, u32 size, bool free);
-static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+ struct b2r2_control *cont, u32 offset, u32 size, bool free);
+static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block);
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mem_heap,
+ struct b2r2_mem_heap_status *mem_heap_status);
/* Align value down to specified alignment */
static inline u32 align_down(u32 align, u32 value)
@@ -95,10 +51,10 @@ static inline u32 align_up(u32 align, u32 value)
* Mount like this:
* mkdir /debug
* mount -t debugfs none /debug
- * ls /debug/b2r2_mem
+ * ls /debug/b2r2/mem
*
- * ls -al /debug/b2r2_mem/blocks
- * cat /debug/b2r2_mem/stats
+ * ls -al /debug/b2r2/mem/blocks
+ * cat /debug/b2r2/mem/stats
*/
@@ -107,41 +63,41 @@ static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf)
{
struct b2r2_mem_heap_status mem_heap_status;
- if (b2r2_mem_heap_status(&mem_heap_status) != 0) {
+ if (b2r2_mem_heap_status(mem_heap, &mem_heap_status) != 0) {
strcpy(buf, "Error, failed to get status\n");
return buf;
}
sprintf(buf,
- "Handle : 0x%lX\n"
- "Physical start address : 0x%lX\n"
- "Size : %lu\n"
- "Align : %lu\n"
- "No of blocks allocated : %lu\n"
- "Allocated size : %lu\n"
- "No of free blocks : %lu\n"
- "Free size : %lu\n"
- "No of locks : %lu\n"
- "No of locked : %lu\n"
- "No of nodes : %lu\n",
- (unsigned long) mem_heap,
- (unsigned long) mem_heap_status.start_phys_addr,
- (unsigned long) mem_heap_status.size,
- (unsigned long) mem_heap_status.align,
- (unsigned long) mem_heap_status.num_alloc,
- (unsigned long) mem_heap_status.allocated_size,
- (unsigned long) mem_heap_status.num_free,
- (unsigned long) mem_heap_status.free_size,
- (unsigned long) mem_heap_status.num_locks,
- (unsigned long) mem_heap_status.num_locked,
- (unsigned long) mem_heap_status.num_nodes);
+ "Handle : 0x%lX\n"
+ "Physical start address : 0x%lX\n"
+ "Size : %lu\n"
+ "Align : %lu\n"
+ "No of blocks allocated : %lu\n"
+ "Allocated size : %lu\n"
+ "No of free blocks : %lu\n"
+ "Free size : %lu\n"
+ "No of locks : %lu\n"
+ "No of locked : %lu\n"
+ "No of nodes : %lu\n",
+ (unsigned long) mem_heap,
+ (unsigned long) mem_heap_status.start_phys_addr,
+ (unsigned long) mem_heap_status.size,
+ (unsigned long) mem_heap_status.align,
+ (unsigned long) mem_heap_status.num_alloc,
+ (unsigned long) mem_heap_status.allocated_size,
+ (unsigned long) mem_heap_status.num_free,
+ (unsigned long) mem_heap_status.free_size,
+ (unsigned long) mem_heap_status.num_locks,
+ (unsigned long) mem_heap_status.num_locked,
+ (unsigned long) mem_heap_status.num_nodes);
return buf;
}
/*
* Print memory heap status on file
- * (Use like "cat /debug/b2r2_mem/igram/stats")
+ * (Use like "cat /debug/b2r2/mem/stats")
*/
static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
@@ -186,11 +142,11 @@ static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf,
int ret = 0;
dev_size = sprintf(Buf, "offset: %08lX %s size: %8d "
- "lock_count: %2d\n",
- (unsigned long) mem_block->offset,
- mem_block->free ? "free" : "allc",
- mem_block->size,
- mem_block->lock_count);
+ "lock_count: %2d\n",
+ (unsigned long) mem_block->offset,
+ mem_block->free ? "free" : "allc",
+ mem_block->size,
+ mem_block->lock_count);
/* No more to read if offset != 0 */
if (*f_pos > dev_size)
@@ -235,13 +191,13 @@ void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
/* Add the block in debugfs */
if (mem_block->free)
sprintf(mem_block->debugfs_fname, "%08lX free",
- (unsigned long) mem_block->offset);
+ (unsigned long) mem_block->offset);
else {
sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX "
- "lck:%d ",
- (unsigned long) mem_block->offset,
- (unsigned long) mem_block,
- mem_block->lock_count);
+ "lck:%d ",
+ (unsigned long) mem_block->offset,
+ (unsigned long) mem_block,
+ mem_block->lock_count);
}
mem_block->debugfs_block = debugfs_create_file(
@@ -258,75 +214,66 @@ void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block,
#endif /* CONFIG_DEBUG_FS */
/* Module initialization function */
-int b2r2_mem_init(struct device *dev, u32 heap_size, u32 align, u32 node_size)
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size)
{
struct b2r2_mem_block *mem_block;
u32 aligned_size;
- printk(KERN_INFO "B2R2_MEM: Creating heap for size %d bytes\n",
- (int) heap_size);
+ dev_info(cont->dev, "%s: Creating heap for size %d bytes\n",
+ __func__, (int) heap_size);
/* Align size */
aligned_size = align_down(align, heap_size);
if (aligned_size == 0)
return -EINVAL;
- mem_heap = kcalloc(sizeof(struct b2r2_mem_heap), 1, GFP_KERNEL);
- if (!mem_heap)
- return -ENOMEM;
-
- mem_heap->start_virt_ptr = dma_alloc_coherent(dev,
- aligned_size, &(mem_heap->start_phys_addr), GFP_KERNEL);
- if (!mem_heap->start_phys_addr || !mem_heap->start_virt_ptr) {
+ cont->mem_heap.start_virt_ptr = dma_alloc_coherent(cont->dev,
+ aligned_size, &(cont->mem_heap.start_phys_addr), GFP_KERNEL);
+ if (!cont->mem_heap.start_phys_addr || !cont->mem_heap.start_virt_ptr) {
printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
return -ENOMEM;
}
/* Initialize the heap */
- mem_heap->dev = dev;
- mem_heap->size = aligned_size;
- mem_heap->align = align;
+ cont->mem_heap.size = aligned_size;
+ cont->mem_heap.align = align;
- INIT_LIST_HEAD(&mem_heap->blocks);
+ INIT_LIST_HEAD(&cont->mem_heap.blocks);
#ifdef CONFIG_DEBUG_FS
/* Register debugfs */
-
- debugfs_root_dir = debugfs_create_dir("b2r2_mem", NULL);
-
- mem_heap->debugfs_sub_root_dir = debugfs_create_dir("b2r2_mem",
- debugfs_root_dir);
- mem_heap->debugfs_heap_stats = debugfs_create_file(
- "stats", 0444, mem_heap->debugfs_sub_root_dir, mem_heap,
- &debugfs_b2r2_mem_stats_fops);
- mem_heap->debugfs_dir_blocks = debugfs_create_dir(
- "blocks", mem_heap->debugfs_sub_root_dir);
+ if (cont->mem_heap.debugfs_root_dir) {
+ cont->mem_heap.debugfs_heap_stats = debugfs_create_file(
+ "stats", 0444, cont->mem_heap.debugfs_root_dir,
+ &cont->mem_heap, &debugfs_b2r2_mem_stats_fops);
+ cont->mem_heap.debugfs_dir_blocks = debugfs_create_dir(
+ "blocks", cont->mem_heap.debugfs_root_dir);
+ }
#endif
/* Create the first _free_ memory block */
- mem_block = b2r2_mem_block_alloc(0, aligned_size, true);
+ mem_block = b2r2_mem_block_alloc(cont, 0, aligned_size, true);
if (!mem_block) {
- dma_free_coherent(dev, aligned_size,
- mem_heap->start_virt_ptr,
- mem_heap->start_phys_addr);
- kfree(mem_heap);
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
return -ENOMEM;
}
/* Add the free block to the blocks list */
- list_add(&mem_block->list, &mem_heap->blocks);
+ list_add(&mem_block->list, &cont->mem_heap.blocks);
/* Allocate separate heap for B2R2 nodes */
- mem_heap->node_size = node_size;
- mem_heap->node_heap = dma_pool_create("b2r2_node_cache",
- dev, node_size, align, 4096);
- if (!mem_heap->node_heap) {
+ cont->mem_heap.node_size = node_size;
+ cont->mem_heap.node_heap = dma_pool_create("b2r2_node_cache",
+ cont->dev, node_size, align, 4096);
+ if (!cont->mem_heap.node_heap) {
b2r2_mem_block_free(mem_block);
- dma_free_coherent(dev, aligned_size,
- mem_heap->start_virt_ptr,
- mem_heap->start_phys_addr);
- kfree(mem_heap);
+ dma_free_coherent(cont->dev, aligned_size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n");
return -ENOMEM;
}
@@ -336,53 +283,45 @@ int b2r2_mem_init(struct device *dev, u32 heap_size, u32 align, u32 node_size)
EXPORT_SYMBOL(b2r2_mem_init);
/* Module exit function */
-void b2r2_mem_exit(void)
+void b2r2_mem_exit(struct b2r2_control *cont)
{
struct list_head *ptr;
/* Free B2R2 node heap */
- dma_pool_destroy(mem_heap->node_heap);
-
-#ifdef CONFIG_DEBUG_FS
- /* debugfs root dir */
- if (debugfs_root_dir) {
- debugfs_remove_recursive(debugfs_root_dir);
- debugfs_root_dir = NULL;
- }
-#endif
+ dma_pool_destroy(cont->mem_heap.node_heap);
- list_for_each(ptr, &mem_heap->blocks) {
+ list_for_each(ptr, &cont->mem_heap.blocks) {
struct b2r2_mem_block *mem_block =
list_entry(ptr, struct b2r2_mem_block, list);
b2r2_mem_block_free(mem_block);
}
- dma_free_coherent(mem_heap->dev, mem_heap->size,
- mem_heap->start_virt_ptr,
- mem_heap->start_phys_addr);
- kfree(mem_heap);
+ dma_free_coherent(cont->dev, cont->mem_heap.size,
+ cont->mem_heap.start_virt_ptr,
+ cont->mem_heap.start_phys_addr);
}
EXPORT_SYMBOL(b2r2_mem_exit);
/* Return status of the heap */
-int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status)
+static int b2r2_mem_heap_status(struct b2r2_mem_heap *mheap,
+ struct b2r2_mem_heap_status *mem_heap_status)
{
struct list_head *ptr;
- if (!mem_heap || !mem_heap_status)
+ if (!mheap || !mem_heap_status)
return -EINVAL;
memset(mem_heap_status, 0, sizeof(*mem_heap_status));
/* Lock the heap */
- spin_lock(&mem_heap->heap_lock);
+ spin_lock(&mheap->heap_lock);
/* Fill in static info */
- mem_heap_status->start_phys_addr = mem_heap->start_phys_addr;
- mem_heap_status->size = mem_heap->size;
- mem_heap_status->align = mem_heap->align;
+ mem_heap_status->start_phys_addr = mheap->start_phys_addr;
+ mem_heap_status->size = mheap->size;
+ mem_heap_status->align = mheap->align;
- list_for_each(ptr, &mem_heap->blocks) {
+ list_for_each(ptr, &mheap->blocks) {
struct b2r2_mem_block *mem_block =
list_entry(ptr, struct b2r2_mem_block, list);
@@ -400,7 +339,7 @@ int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status)
}
}
- spin_unlock(&mem_heap->heap_lock);
+ spin_unlock(&mheap->heap_lock);
return 0;
}
@@ -410,7 +349,7 @@ EXPORT_SYMBOL(b2r2_mem_heap_status);
* for an allocated or free memory block
*/
static struct b2r2_mem_block *b2r2_mem_block_alloc(
- u32 offset, u32 size, bool free)
+ struct b2r2_control *cont, u32 offset, u32 size, bool free)
{
struct b2r2_mem_block *mem_block = kmalloc(
sizeof(struct b2r2_mem_block), GFP_KERNEL);
@@ -427,7 +366,7 @@ static struct b2r2_mem_block *b2r2_mem_block_alloc(
mem_block->debugfs_block = NULL;
/* Add the block in debugfs */
debugfs_create_mem_block_entry(mem_block,
- mem_heap->debugfs_dir_blocks);
+ cont->mem_heap.debugfs_dir_blocks);
#endif
}
@@ -446,7 +385,8 @@ static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block)
}
/* Allocate a block from the heap */
-int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle)
{
int ret = 0;
struct list_head *ptr;
@@ -459,21 +399,19 @@ int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
printk(KERN_INFO "%s: size=%d\n", __func__, requested_size);
*mem_handle = 0;
- if (!mem_heap)
- return -EINVAL;
/* Lock the heap */
- spin_lock(&mem_heap->heap_lock);
+ spin_lock(&cont->mem_heap.heap_lock);
- aligned_size = align_up(mem_heap->align, requested_size);
+ aligned_size = align_up(cont->mem_heap.align, requested_size);
/* Try to find the best matching free block of suitable size */
- list_for_each(ptr, &mem_heap->blocks) {
+ list_for_each(ptr, &cont->mem_heap.blocks) {
struct b2r2_mem_block *mem_block =
list_entry(ptr, struct b2r2_mem_block, list);
if (mem_block->free && mem_block->size >= aligned_size &&
- (!found_mem_block ||
- mem_block->size < found_mem_block->size)) {
+ (!found_mem_block ||
+ mem_block->size < found_mem_block->size)) {
found_mem_block = mem_block;
if (found_mem_block->size == aligned_size)
break;
@@ -482,7 +420,8 @@ int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
if (found_mem_block) {
struct b2r2_mem_block *new_block
- = b2r2_mem_block_alloc(found_mem_block->offset,
+ = b2r2_mem_block_alloc(cont,
+ found_mem_block->offset,
requested_size, false);
if (new_block) {
@@ -500,7 +439,7 @@ int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
#ifdef CONFIG_DEBUG_FS
debugfs_create_mem_block_entry(
found_mem_block,
- mem_heap->debugfs_dir_blocks);
+ cont->mem_heap.debugfs_dir_blocks);
#endif
}
@@ -518,14 +457,14 @@ int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle)
}
/* Unlock */
- spin_unlock(&mem_heap->heap_lock);
+ spin_unlock(&cont->mem_heap.heap_lock);
return ret;
}
EXPORT_SYMBOL(b2r2_mem_alloc);
/* Free the allocated block */
-int b2r2_mem_free(u32 mem_handle)
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle)
{
int ret = 0;
struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle;
@@ -534,7 +473,7 @@ int b2r2_mem_free(u32 mem_handle)
return -EINVAL;
/* Lock the heap */
- spin_lock(&mem_heap->heap_lock);
+ spin_lock(&cont->mem_heap.heap_lock);
if (!ret && mem_block->free)
ret = -EINVAL;
@@ -544,18 +483,18 @@ int b2r2_mem_free(u32 mem_handle)
/* Release the block */
mem_block->free = true;
- mem_block->size = align_up(mem_heap->align,
+ mem_block->size = align_up(cont->mem_heap.align,
mem_block->size);
/* Join with previous block if possible */
- if (mem_block->list.prev != &mem_heap->blocks) {
+ if (mem_block->list.prev != &cont->mem_heap.blocks) {
struct b2r2_mem_block *prev_block =
list_entry(mem_block->list.prev,
struct b2r2_mem_block, list);
if (prev_block->free &&
- (prev_block->offset + prev_block->size) ==
- mem_block->offset) {
+ (prev_block->offset + prev_block->size) ==
+ mem_block->offset) {
mem_block->offset = prev_block->offset;
mem_block->size += prev_block->size;
@@ -564,15 +503,15 @@ int b2r2_mem_free(u32 mem_handle)
}
/* Join with next block if possible */
- if (mem_block->list.next != &mem_heap->blocks) {
+ if (mem_block->list.next != &cont->mem_heap.blocks) {
struct b2r2_mem_block *next_block
= list_entry(mem_block->list.next,
- struct b2r2_mem_block,
- list);
+ struct b2r2_mem_block,
+ list);
if (next_block->free &&
- (mem_block->offset + mem_block->size) ==
- next_block->offset) {
+ (mem_block->offset + mem_block->size) ==
+ next_block->offset) {
mem_block->size += next_block->size;
b2r2_mem_block_free(next_block);
@@ -580,19 +519,20 @@ int b2r2_mem_free(u32 mem_handle)
}
#ifdef CONFIG_DEBUG_FS
debugfs_create_mem_block_entry(mem_block,
- mem_heap->debugfs_dir_blocks);
+ cont->mem_heap.debugfs_dir_blocks);
#endif
}
/* Unlock */
- spin_unlock(&mem_heap->heap_lock);
+ spin_unlock(&cont->mem_heap.heap_lock);
return ret;
}
EXPORT_SYMBOL(b2r2_mem_free);
/* Lock the allocated block in memory */
-int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size)
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size)
{
struct b2r2_mem_block *mem_block =
(struct b2r2_mem_block *) mem_handle;
@@ -601,30 +541,30 @@ int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size)
return -EINVAL;
/* Lock the heap */
- spin_lock(&mem_heap->heap_lock);
+ spin_lock(&cont->mem_heap.heap_lock);
mem_block->lock_count++;
if (phys_addr)
- *phys_addr = mem_heap->start_phys_addr + mem_block->offset;
+ *phys_addr = cont->mem_heap.start_phys_addr + mem_block->offset;
if (virt_ptr)
- *virt_ptr = (char *) mem_heap->start_virt_ptr +
+ *virt_ptr = (char *) cont->mem_heap.start_virt_ptr +
mem_block->offset;
if (size)
- *size = align_up(mem_heap->align, mem_block->size);
+ *size = align_up(cont->mem_heap.align, mem_block->size);
#ifdef CONFIG_DEBUG_FS
debugfs_create_mem_block_entry(mem_block,
- mem_heap->debugfs_dir_blocks);
+ cont->mem_heap.debugfs_dir_blocks);
#endif
- spin_unlock(&mem_heap->heap_lock);
+ spin_unlock(&cont->mem_heap.heap_lock);
return 0;
}
EXPORT_SYMBOL(b2r2_mem_lock);
/* Unlock the allocated block in memory */
-int b2r2_mem_unlock(u32 mem_handle)
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle)
{
struct b2r2_mem_block *mem_block =
(struct b2r2_mem_block *) mem_handle;
@@ -633,11 +573,11 @@ int b2r2_mem_unlock(u32 mem_handle)
return -EINVAL;
/* Lock the heap */
- spin_lock(&mem_heap->heap_lock);
+ spin_lock(&cont->mem_heap.heap_lock);
mem_block->lock_count--;
- spin_unlock(&mem_heap->heap_lock);
+ spin_unlock(&cont->mem_heap.heap_lock);
/* debugfs will be updated in release */
return 0;
@@ -646,7 +586,8 @@ int b2r2_mem_unlock(u32 mem_handle)
EXPORT_SYMBOL(b2r2_mem_unlock);
/* Allocate one or more b2r2 nodes from DMA pool */
-int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node)
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node)
{
int i;
int ret = 0;
@@ -656,7 +597,7 @@ int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node)
/* Check input parameters */
if ((num_nodes <= 0) || !first_node) {
- printk(KERN_ERR
+ dev_err(cont->dev,
"B2R2_MEM: Invalid parameter for b2r2_node_alloc, "
"num_nodes=%d, first_node=%ld\n",
(int) num_nodes, (long) first_node);
@@ -664,48 +605,49 @@ int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node)
}
/* Allocate the first node */
- first_node_ptr = dma_pool_alloc(mem_heap->node_heap,
+ first_node_ptr = dma_pool_alloc(cont->mem_heap.node_heap,
GFP_DMA | GFP_KERNEL, &physical_address);
- if (first_node_ptr) {
- /* Initialize first node */
- first_node_ptr->next = NULL;
- first_node_ptr->physical_address = physical_address +
+ if (!first_node_ptr) {
+ dev_err(cont->dev,
+ "B2R2_MEM: Failed to allocate memory for node\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize first node */
+ first_node_ptr->next = NULL;
+ first_node_ptr->physical_address = physical_address +
offsetof(struct b2r2_node, node);
- /* Allocate and initialize remaining nodes, */
- /* and link them into a list */
- for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
- node_ptr->next = dma_pool_alloc(mem_heap->node_heap,
+ /* Allocate and initialize remaining nodes, */
+ /* and link them into a list */
+ for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) {
+ node_ptr->next = dma_pool_alloc(cont->mem_heap.node_heap,
GFP_DMA | GFP_KERNEL, &physical_address);
- if (node_ptr->next) {
- node_ptr = node_ptr->next;
- node_ptr->next = NULL;
- node_ptr->physical_address = physical_address +
+ if (node_ptr->next) {
+ node_ptr = node_ptr->next;
+ node_ptr->next = NULL;
+ node_ptr->physical_address = physical_address +
offsetof(struct b2r2_node, node);
- } else {
- printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
- ret = -ENOMEM;
- break;
- }
+ } else {
+ printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
+ ret = -ENOMEM;
+ break;
}
-
- /* If all nodes were allocated successfully, */
- /* return the first node */
- if (!ret)
- *first_node = first_node_ptr;
- else
- b2r2_node_free(first_node_ptr);
- } else {
- printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n");
- ret = -ENOMEM;
}
+ /* If all nodes were allocated successfully, */
+ /* return the first node */
+ if (!ret)
+ *first_node = first_node_ptr;
+ else
+ b2r2_node_free(cont, first_node_ptr);
+
return ret;
}
EXPORT_SYMBOL(b2r2_node_alloc);
/* Free a linked list of b2r2 nodes */
-void b2r2_node_free(struct b2r2_node *first_node)
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node)
{
struct b2r2_node *current_node = first_node;
struct b2r2_node *next_node = NULL;
@@ -713,9 +655,9 @@ void b2r2_node_free(struct b2r2_node *first_node)
/* Traverse the linked list and free the nodes */
while (current_node != NULL) {
next_node = current_node->next;
- dma_pool_free(mem_heap->node_heap, current_node,
- current_node->physical_address -
- offsetof(struct b2r2_node, node));
+ dma_pool_free(cont->mem_heap.node_heap, current_node,
+ current_node->physical_address -
+ offsetof(struct b2r2_node, node));
current_node = next_node;
}
}
diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h
index 33309c972f5..4fd1e66abca 100644
--- a/drivers/video/b2r2/b2r2_mem_alloc.h
+++ b/drivers/video/b2r2/b2r2_mem_alloc.h
@@ -17,6 +17,7 @@
/**
* struct b2r2_mem_heap_status - Information about current state of the heap
+ *
* @start_phys_addr: Physical address of the the memory area
* @size: Size of the memory area
* @align: Alignment of start and allocation sizes (in bytes).
@@ -42,6 +43,30 @@ struct b2r2_mem_heap_status {
u32 num_nodes;
};
+/**
+ * struct b2r2_mem_block - Represents one block of b2r2
+ * physical memory, free or allocated
+ *
+ * @list: For membership in list
+ * @offset: Offset in b2r2 physical memory area (aligned)
+ * @size: Size of the object (requested size if busy, else actual)
+ * @free: True if the block is free
+ * @lock_count: Lock count
+ * @debugfs_fname: Debugfs file name
+ * @debugfs_block: Debugfs dir entry for the block
+ */
+struct b2r2_mem_block {
+ struct list_head list;
+ u32 offset;
+ u32 size;
+ bool free;
+ u32 lock_count;
+#ifdef CONFIG_DEBUG_FS
+ char debugfs_fname[80];
+ struct dentry *debugfs_block;
+#endif
+};
+
/* B2R2 memory API (kernel) */
@@ -54,21 +79,14 @@ struct b2r2_mem_heap_status {
*
* Returns 0 if success, else negative error code
**/
-int b2r2_mem_init(struct device *dev, u32 heap_size, u32 align, u32 node_size);
+int b2r2_mem_init(struct b2r2_control *cont,
+ u32 heap_size, u32 align, u32 node_size);
/**
* b2r2_mem_exit() - Cleans up the B2R2 memory manager
*
**/
-void b2r2_mem_exit(void);
-
-/**
- * b2r2_mem_heap_status() - Get information about the current heap state
- * @mem_heap_status: Struct containing status info on succesful return
- *
- * Returns 0 if success, else negative error code
- **/
-int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status);
+void b2r2_mem_exit(struct b2r2_control *cont);
/**
* b2r2_mem_alloc() - Allocates memory block from physical memory heap
@@ -81,7 +99,8 @@ int b2r2_mem_heap_status(struct b2r2_mem_heap_status *mem_heap_status);
* All memory allocations are movable when not locked.
* Returns 0 if OK else negative error value
**/
-int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle);
+int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size,
+ u32 *returned_size, u32 *mem_handle);
/**
* b2r2_mem_free() - Frees an allocation
@@ -89,7 +108,7 @@ int b2r2_mem_alloc(u32 requested_size, u32 *returned_size, u32 *mem_handle);
*
* Returns 0 if OK else negative error value
**/
-int b2r2_mem_free(u32 mem_handle);
+int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle);
/**
* b2r2_mem_lock() - Lock memory in memory and return physical address
@@ -107,7 +126,8 @@ int b2r2_mem_free(u32 mem_handle);
* b2r2_mem_lock.
* Returns 0 if OK else negative error value
**/
-int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size);
+int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle,
+ u32 *phys_addr, void **virt_ptr, u32 *size);
/**
* b2r2_mem_unlock() - Unlock previously locked memory
@@ -117,7 +137,7 @@ int b2r2_mem_lock(u32 mem_handle, u32 *phys_addr, void **virt_ptr, u32 *size);
* memory area is movable again.
* Returns 0 if OK else negative error value
**/
-int b2r2_mem_unlock(u32 mem_handle);
+int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle);
/**
* b2r2_node_alloc() - Allocates B2R2 node from physical memory heap
@@ -126,7 +146,8 @@ int b2r2_mem_unlock(u32 mem_handle);
*
* Returns 0 if OK else negative error value
**/
-int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node);
+int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes,
+ struct b2r2_node **first_node);
/**
* b2r2_node_free() - Frees a linked list of allocated B2R2 nodes
@@ -134,7 +155,7 @@ int b2r2_node_alloc(u32 num_nodes, struct b2r2_node **first_node);
*
* Returns 0 if OK else negative error value
**/
-void b2r2_node_free(struct b2r2_node *first_node);
+void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node);
#endif /* __B2R2_MEM_ALLOC_H */
diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c
index f452a4b11df..1f48bac6fe7 100644
--- a/drivers/video/b2r2/b2r2_node_gen.c
+++ b/drivers/video/b2r2/b2r2_node_gen.c
@@ -15,7 +15,8 @@
#include <asm/dma-mapping.h>
#include "b2r2_internal.h"
-static void free_nodes(struct b2r2_node *first_node)
+static void free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
{
struct b2r2_node *node = first_node;
int no_of_nodes = 0;
@@ -25,34 +26,35 @@ static void free_nodes(struct b2r2_node *first_node)
node = node->next;
}
- dma_free_coherent(b2r2_blt_device(),
- no_of_nodes * sizeof(struct b2r2_node),
- first_node,
- first_node->physical_address -
- offsetof(struct b2r2_node, node));
+ dma_free_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ first_node,
+ first_node->physical_address -
+ offsetof(struct b2r2_node, node));
}
-struct b2r2_node *b2r2_blt_alloc_nodes(int no_of_nodes)
+struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont,
+ int no_of_nodes)
{
u32 physical_address;
struct b2r2_node *nodes;
struct b2r2_node *tmpnode;
if (no_of_nodes <= 0) {
- dev_err(b2r2_blt_device(), "%s: Wrong number of nodes (%d)",
- __func__, no_of_nodes);
+ dev_err(cont->dev, "%s: Wrong number of nodes (%d)",
+ __func__, no_of_nodes);
return NULL;
}
/* Allocate the memory */
- nodes = (struct b2r2_node *) dma_alloc_coherent(b2r2_blt_device(),
- no_of_nodes * sizeof(struct b2r2_node),
- &physical_address, GFP_DMA | GFP_KERNEL);
+ nodes = (struct b2r2_node *) dma_alloc_coherent(cont->dev,
+ no_of_nodes * sizeof(struct b2r2_node),
+ &physical_address, GFP_DMA | GFP_KERNEL);
if (nodes == NULL) {
- dev_err(b2r2_blt_device(),
- "%s: Failed to alloc memory for nodes",
- __func__);
+ dev_err(cont->dev,
+ "%s: Failed to alloc memory for nodes",
+ __func__);
return NULL;
}
@@ -73,8 +75,9 @@ struct b2r2_node *b2r2_blt_alloc_nodes(int no_of_nodes)
return nodes;
}
-void b2r2_blt_free_nodes(struct b2r2_node *first_node)
+void b2r2_blt_free_nodes(struct b2r2_control *cont,
+ struct b2r2_node *first_node)
{
- free_nodes(first_node);
+ free_nodes(cont, first_node);
}
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
index 22a0b9c0e5c..6587ef0c343 100644
--- a/drivers/video/b2r2/b2r2_node_split.c
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -155,8 +155,8 @@ static const u32 vmx_yvu_to_yuv[] = {
/*
* Forward declaration of private functions
*/
-
-static int analyze_fmt_conv(struct b2r2_node_split_buf *src,
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst,
const u32 **vmx, u32 *node_count);
static int analyze_color_fill(struct b2r2_node_split_job *this,
@@ -176,56 +176,67 @@ static int analyze_transform(struct b2r2_node_split_job *this,
static int analyze_rot_scale(struct b2r2_node_split_job *this,
const struct b2r2_blt_request *req, u32 *node_count,
u32 *buf_count);
-static int analyze_scale_factors(struct b2r2_node_split_job *this);
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this);
-static void configure_src(struct b2r2_node *node,
+static void configure_src(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_node_split_buf *src, const u32 *ivmx);
-static void configure_bg(struct b2r2_node *node,
+static void configure_bg(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_node_split_buf *bg, bool swap_fg_bg);
-static int configure_dst(struct b2r2_node *node,
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_node_split_buf *dst, const u32 *ivmx,
struct b2r2_node **next);
-static void configure_blend(struct b2r2_node *node, u32 flags,
- u32 global_alpha);
-static void configure_clip(struct b2r2_node *node,
+static void configure_blend(struct b2r2_control *cont, struct b2r2_node *node,
+ u32 flags, u32 global_alpha);
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_blt_rect *clip_rect);
-static int configure_tile(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next);
-static void configure_direct_fill(struct b2r2_node *node, u32 color,
- struct b2r2_node_split_buf *dst, struct b2r2_node **next);
-static int configure_fill(struct b2r2_node *node, u32 color,
- enum b2r2_blt_fmt fmt, struct b2r2_node_split_buf *dst,
- const u32 *ivmx, struct b2r2_node **next);
-static void configure_direct_copy(struct b2r2_node *node,
- struct b2r2_node_split_buf *src,
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+static void configure_direct_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next);
+static int configure_fill(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 color, enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node **next);
+static void configure_direct_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst, struct b2r2_node **next);
-static int configure_copy(struct b2r2_node *node,
- struct b2r2_node_split_buf *src,
+static int configure_copy(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst, const u32 *ivmx,
struct b2r2_node **next,
struct b2r2_node_split_job *this);
-static int configure_rotate(struct b2r2_node *node,
- struct b2r2_node_split_buf *src,
+static int configure_rotate(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst, const u32 *ivmx,
struct b2r2_node **next,
struct b2r2_node_split_job *this);
-static int configure_scale(struct b2r2_node *node,
- struct b2r2_node_split_buf *src,
+static int configure_scale(struct b2r2_control *cont,
+ struct b2r2_node *node, struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf,
const u32 *ivmx, struct b2r2_node **next,
struct b2r2_node_split_job *this);
-static int configure_rot_scale(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next);
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next);
+
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req);
-static void recalculate_rects(struct b2r2_blt_req *req);
-static int check_rect(const struct b2r2_blt_img *img,
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
const struct b2r2_blt_rect *rect,
const struct b2r2_blt_rect *clip);
-static void set_buf(struct b2r2_node_split_buf *buf, u32 addr,
- const struct b2r2_blt_img *img,
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr, const struct b2r2_blt_img *img,
const struct b2r2_blt_rect *rect, bool color_fill, u32 color);
-static int setup_tmp_buf(struct b2r2_node_split_buf *this, u32 max_size,
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *this, u32 max_size,
enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
static enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt);
@@ -249,7 +260,7 @@ static u32 to_RGB888(u32 color, const enum b2r2_blt_fmt fmt);
static enum b2r2_fmt_type get_fmt_type(enum b2r2_blt_fmt fmt);
static bool is_transform(const struct b2r2_blt_request *req);
-static s32 rescale(s32 dim, u16 sf);
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf);
static s32 inv_rescale(s32 dim, u16 sf);
static void set_target(struct b2r2_node *node, u32 addr,
@@ -282,8 +293,9 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
{
int ret;
bool color_fill;
+ struct b2r2_control *cont = req->instance->control;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
memset(this, 0, sizeof(*this));
@@ -331,14 +343,13 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
}
/* Unsupported formats on bg */
- if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
/*
- * There are no ivmx on source 1, so check that
- * there is no such requirement on the background
- * to destination format conversion. This check is sufficient
- * since the node splitter currently does not support
- * destination ivmx. That fact also removes
- * the source format as a parameter when checking the
+ * There are no ivmx on source 1, so check that there is no
+ * such requirement on the background to destination format
+ * conversion. This check is sufficient since the node splitter
+ * currently does not support destination ivmx. That fact also
+ * removes the source format as a parameter when checking the
* background format.
*/
if (bg_format_require_ivmx(req->user_req.bg_img.fmt,
@@ -346,63 +357,60 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
ret = -ENOSYS;
goto unsupported;
}
- }
if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) &&
(is_yuv_fmt(req->user_req.src_img.fmt) ||
req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 ||
req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) {
- b2r2_log_warn("%s: Unsupported: source color keying with "
- "YUV or pure alpha formats.\n",
- __func__);
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source color keying "
+ "with YUV or pure alpha formats.\n", __func__);
ret = -ENOSYS;
goto unsupported;
}
if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_MASK)) {
- b2r2_log_warn("%s: Unsupported: source mask, "
- "destination color keying.\n",
- __func__);
+ b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, "
+ "destination color keying.\n", __func__);
ret = -ENOSYS;
goto unsupported;
}
if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) &&
req->user_req.clut == NULL) {
- b2r2_log_warn("%s: Invalid request: no table specified "
- "for CLUT color correction.\n",
+ b2r2_log_warn(cont->dev, "%s: Invalid request: no table "
+ "specified for CLUT color correction.\n",
__func__);
return -EINVAL;
}
/* Check for color fill */
color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL |
- B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
+ B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0;
/*
* B2R2 cannot handle destination clipping on buffers
* allocated close to 64MiB bank boundaries.
* recalculate src_ and dst_rect to avoid clipping.
*/
- recalculate_rects((struct b2r2_blt_req *) &req->user_req);
+ recalculate_rects(cont, (struct b2r2_blt_req *) &req->user_req);
/* Configure the source and destination buffers */
- set_buf(&this->src, req->src_resolved.physical_address,
- &req->user_req.src_img, &req->user_req.src_rect,
- color_fill, req->user_req.src_color);
+ set_buf(cont, &this->src, req->src_resolved.physical_address,
+ &req->user_req.src_img, &req->user_req.src_rect,
+ color_fill, req->user_req.src_color);
if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
- set_buf(&this->bg, req->bg_resolved.physical_address,
- &req->user_req.bg_img, &req->user_req.bg_rect,
- false, 0);
+ set_buf(cont, &this->bg, req->bg_resolved.physical_address,
+ &req->user_req.bg_img, &req->user_req.bg_rect,
+ false, 0);
}
- set_buf(&this->dst, req->dst_resolved.physical_address,
+ set_buf(cont, &this->dst, req->dst_resolved.physical_address,
&req->user_req.dst_img, &req->user_req.dst_rect, false,
0);
- b2r2_log_info("%s:\n"
+ b2r2_log_info(cont->dev, "%s:\n"
"\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
"bg.rect=(%4d, %4d, %4d, %4d)\t"
"dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
@@ -428,15 +436,14 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
this->blend = true;
if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) {
- b2r2_log_warn("%s: Unsupported: blend with planar source\n",
- __func__);
+ b2r2_log_warn(cont->dev, "%s: Unsupported: blend with planar"
+ " source\n", __func__);
ret = -ENOSYS;
goto unsupported;
}
/* Check for clipping */
- this->clip = (this->flags &
- B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
+ this->clip = (this->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0;
if (this->clip) {
s32 l = req->user_req.dst_clip_rect.x;
s32 r = l + req->user_req.dst_clip_rect.width;
@@ -466,14 +473,14 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
}
/* Validate the destination */
- ret = check_rect(&req->user_req.dst_img, &req->user_req.dst_rect,
+ ret = check_rect(cont, &req->user_req.dst_img, &req->user_req.dst_rect,
&this->clip_rect);
if (ret < 0)
goto error;
/* Validate the source (if not color fill) */
if (!color_fill) {
- ret = check_rect(&req->user_req.src_img,
+ ret = check_rect(cont, &req->user_req.src_img,
&req->user_req.src_rect, NULL);
if (ret < 0)
goto error;
@@ -481,7 +488,7 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
/* Validate the background source */
if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
- ret = check_rect(&req->user_req.bg_img,
+ ret = check_rect(cont, &req->user_req.bg_img,
&req->user_req.bg_rect, NULL);
if (ret < 0)
goto error;
@@ -519,7 +526,7 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
if (ret == -ENOSYS) {
goto unsupported;
} else if (ret < 0) {
- b2r2_log_warn("%s: Analysis failed!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Analysis failed!\n", __func__);
goto error;
}
@@ -545,21 +552,22 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
if (this->buf_count > 0)
*bufs = &this->work_bufs[0];
- b2r2_log_info("%s: dst.win=(%d, %d, %d, %d), dst.dx=%d, dst.dy=%d\n",
- __func__, this->dst.win.x, this->dst.win.y,
- this->dst.win.width, this->dst.win.height, this->dst.dx,
- this->dst.dy);
+ b2r2_log_info(cont->dev, "%s: dst.win=(%d, %d, %d, %d), "
+ "dst.dx=%d, dst.dy=%d\n", __func__, this->dst.win.x,
+ this->dst.win.y, this->dst.win.width, this->dst.win.height,
+ this->dst.dx, this->dst.dy);
if (this->buf_count > 0)
- b2r2_log_info("%s: buf_count=%d, buf_size=%d, node_count=%d\n",
- __func__, *buf_count, bufs[0]->size, *node_count);
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, buf_size=%d, "
+ "node_count=%d\n", __func__, *buf_count,
+ bufs[0]->size, *node_count);
else
- b2r2_log_info("%s: buf_count=%d, node_count=%d\n",
+ b2r2_log_info(cont->dev, "%s: buf_count=%d, node_count=%d\n",
__func__, *buf_count, *node_count);
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
unsupported:
return ret;
}
@@ -567,8 +575,8 @@ unsupported:
/**
* b2r2_node_split_configure() - configures the node list
*/
-int b2r2_node_split_configure(struct b2r2_node_split_job *this,
- struct b2r2_node *first)
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
{
int ret;
@@ -594,13 +602,14 @@ int b2r2_node_split_configure(struct b2r2_node_split_job *this,
if (dst_w > dst->rect.width - x_pixels)
dst->win.width = dst->rect.width - x_pixels;
- ret = configure_tile(this, node, &node);
+ ret = configure_tile(cont, this, node, &node);
if (ret < 0)
goto error;
dst->win.x += dst->dx;
x_pixels += max(dst->dx, -dst->dx);
- b2r2_log_info("%s: x_pixels=%d\n", __func__, x_pixels);
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
}
dst->win.y += dst->dy;
@@ -610,22 +619,23 @@ int b2r2_node_split_configure(struct b2r2_node_split_job *this,
dst->win.width = dst_w;
x_pixels = 0;
- b2r2_log_info("%s: y_pixels=%d\n", __func__, y_pixels);
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
}
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
return ret;
}
/**
* b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list
*/
-int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
- struct b2r2_node *first, struct b2r2_work_buf *bufs,
- u32 buf_count)
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first,
+ struct b2r2_work_buf *bufs, u32 buf_count)
{
struct b2r2_node *node = first;
@@ -634,8 +644,8 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
if (node->dst_tmp_index) {
BUG_ON(node->dst_tmp_index > buf_count);
- b2r2_log_info("%s: assigning buf %d as dst\n",
- __func__, node->dst_tmp_index);
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as "
+ "dst\n", __func__, node->dst_tmp_index);
node->node.GROUP1.B2R2_TBA =
bufs[node->dst_tmp_index - 1].phys_addr;
@@ -643,23 +653,23 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
if (node->src_tmp_index) {
u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
- b2r2_log_info("%s: "
- "assigning buf %d as src %d ", __func__,
- node->src_tmp_index, node->src_index);
+ b2r2_log_info(cont->dev, "%s: assigning buf %d as src "
+ "%d ", __func__, node->src_tmp_index,
+ node->src_index);
BUG_ON(node->src_tmp_index > buf_count);
switch (node->src_index) {
case 1:
- b2r2_log_info("1\n");
+ b2r2_log_info(cont->dev, "1\n");
node->node.GROUP3.B2R2_SBA = addr;
break;
case 2:
- b2r2_log_info("2\n");
+ b2r2_log_info(cont->dev, "2\n");
node->node.GROUP4.B2R2_SBA = addr;
break;
case 3:
- b2r2_log_info("3\n");
+ b2r2_log_info(cont->dev, "3\n");
node->node.GROUP5.B2R2_SBA = addr;
break;
default:
@@ -668,7 +678,7 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
}
}
- b2r2_log_info("%s: tba=%p\tsba=%p\n", __func__,
+ b2r2_log_info(cont->dev, "%s: tba=%p\tsba=%p\n", __func__,
(void *)node->node.GROUP1.B2R2_TBA,
(void *)node->node.GROUP4.B2R2_SBA);
@@ -681,8 +691,8 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
/**
* b2r2_node_split_unassign_buffers() - releases temporary buffers
*/
-void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *this,
- struct b2r2_node *first)
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *first)
{
return;
}
@@ -690,7 +700,8 @@ void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *this,
/**
* b2r2_node_split_cancel() - cancels and releases a job instance
*/
-void b2r2_node_split_cancel(struct b2r2_node_split_job *this)
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
{
memset(this, 0, sizeof(*this));
@@ -701,20 +712,23 @@ void b2r2_node_split_cancel(struct b2r2_node_split_job *this)
* Private functions
*/
-static void recalculate_rects(struct b2r2_blt_req *req)
+static void recalculate_rects(struct b2r2_control *cont,
+ struct b2r2_blt_req *req)
{
struct b2r2_blt_rect new_dst_rect;
struct b2r2_blt_rect new_src_rect;
struct b2r2_blt_rect new_bg_rect;
- b2r2_trim_rects(req, &new_bg_rect, &new_dst_rect, &new_src_rect);
+ b2r2_trim_rects(cont,
+ req, &new_bg_rect, &new_dst_rect, &new_src_rect);
req->dst_rect = new_dst_rect;
req->src_rect = new_src_rect;
req->bg_rect = new_bg_rect;
}
-static int check_rect(const struct b2r2_blt_img *img,
+static int check_rect(struct b2r2_control *cont,
+ const struct b2r2_blt_img *img,
const struct b2r2_blt_rect *rect,
const struct b2r2_blt_rect *clip)
{
@@ -724,9 +738,9 @@ static int check_rect(const struct b2r2_blt_img *img,
/* Check rectangle dimensions*/
if ((rect->width <= 0) || (rect->height <= 0)) {
- b2r2_log_warn("%s: "
- "Illegal rect (%d, %d, %d, %d)\n",
- __func__, rect->x, rect->y, rect->width, rect->height);
+ b2r2_log_warn(cont->dev, "%s: Illegal rect (%d, %d, %d, %d)\n",
+ __func__, rect->x, rect->y, rect->width,
+ rect->height);
ret = -EINVAL;
goto error;
}
@@ -747,23 +761,23 @@ static int check_rect(const struct b2r2_blt_img *img,
/* Check so that the rect isn't outside the buffer */
if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) {
- b2r2_log_warn("%s: "
- "rect origin outside buffer\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: rect origin outside buffer\n",
+ __func__);
ret = -EINVAL;
goto error;
}
if ((r > img->width) || (b > img->height)) {
- b2r2_log_warn("%s: "
- "rect ends outside buffer\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: rect ends outside buffer\n",
+ __func__);
ret = -EINVAL;
goto error;
}
/* Check so the intersected rectangle isn't empty */
if ((l == r) || (t == b)) {
- b2r2_log_warn("%s: "
- "rect is empty (width or height zero)\n",
+ b2r2_log_warn(cont->dev,
+ "%s: rect is empty (width or height zero)\n",
__func__);
ret = -EINVAL;
goto error;
@@ -771,7 +785,7 @@ static int check_rect(const struct b2r2_blt_img *img,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -860,7 +874,8 @@ static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt,
/**
* analyze_fmt_conv() - analyze the format conversions needed for a job
*/
-static int analyze_fmt_conv(struct b2r2_node_split_buf *src,
+static int analyze_fmt_conv(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst,
const u32 **vmx, u32 *node_count)
{
@@ -966,10 +981,12 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
const struct b2r2_blt_request *req, u32 *node_count)
{
int ret;
+ struct b2r2_control *cont = req->instance->control;
/* Destination must be raster for raw fill to work */
if (this->dst.type != B2R2_FMT_TYPE_RASTER) {
- b2r2_log_warn("%s: fill requires raster destination\n",
+ b2r2_log_warn(cont->dev,
+ "%s: fill requires raster destination\n",
__func__);
ret = -EINVAL;
goto error;
@@ -1012,7 +1029,7 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
} else {
/* Wait, what? */
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Illegal destination format for fill",
__func__);
ret = -EINVAL;
@@ -1037,8 +1054,9 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
this->src.color);
}
- ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
- node_count);
+ ret = analyze_fmt_conv(
+ cont, &this->src, &this->dst, &this->ivmx,
+ node_count);
if (ret < 0)
goto error;
}
@@ -1046,7 +1064,7 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -1059,10 +1077,12 @@ static int analyze_transform(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
-
bool is_scaling;
+#ifdef CONFIG_B2R2_DEBUG
+ struct b2r2_control *cont = req->instance->control;
+#endif
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/*
* The transform enum is defined so that all rotation transforms are
@@ -1074,9 +1094,10 @@ static int analyze_transform(struct b2r2_node_split_job *this,
if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER ||
this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR ||
this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) {
- b2r2_log_warn("%s: Unsupported operation "
- "(rot && (!dst_raster || dst==422R))",
- __func__);
+ b2r2_log_warn(cont->dev,
+ "%s: Unsupported operation "
+ "(rot && (!dst_raster || dst==422R))",
+ __func__);
ret = -ENOSYS;
goto unsupported;
}
@@ -1105,8 +1126,8 @@ static int analyze_transform(struct b2r2_node_split_job *this,
if (is_scaling && this->rotation && this->blend) {
/* TODO: This is unsupported. Fix it! */
- b2r2_log_info("%s: Unsupported operation (rot+rescale+blend)\n",
- __func__);
+ b2r2_log_info(cont->dev, "%s: Unsupported operation "
+ "(rot+rescale+blend)\n", __func__);
ret = -ENOSYS;
goto unsupported;
}
@@ -1135,7 +1156,7 @@ static int analyze_transform(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
unsupported:
return ret;
}
@@ -1148,6 +1169,7 @@ static int analyze_copy(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
+ struct b2r2_control *cont = req->instance->control;
memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win));
@@ -1168,8 +1190,8 @@ static int analyze_copy(struct b2r2_node_split_job *this,
this->type = B2R2_COPY;
- ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
- &copy_count);
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst,
+ &this->ivmx, &copy_count);
if (ret < 0)
goto error;
@@ -1179,7 +1201,7 @@ static int analyze_copy(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -1202,34 +1224,30 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
-
+ struct b2r2_control *cont = req->instance->control;
struct b2r2_node_split_buf *src = &this->src;
struct b2r2_node_split_buf *dst = &this->dst;
struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
u32 num_rows;
u32 num_cols;
-
u32 rot_count;
u32 rescale_count;
-
u32 nodes_per_rot;
u32 nodes_per_rescale;
-
u32 right_width;
u32 bottom_height;
-
const u32 *dummy_vmx;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
/* Calculate the desired tmp buffer size */
- tmp->win.width = rescale(B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ tmp->win.width = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
tmp->win.width >>= 10;
tmp->win.width = min(tmp->win.width, dst->rect.height);
tmp->win.height = dst->rect.width;
- setup_tmp_buf(tmp, this->max_buf_size, dst->fmt, tmp->win.width,
+ setup_tmp_buf(cont, tmp, this->max_buf_size, dst->fmt, tmp->win.width,
tmp->win.height);
tmp->tmp_buf_index = 1;
this->work_bufs[0].size = tmp->pitch * tmp->height;
@@ -1254,12 +1272,12 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
* Calculate how many nodes are required to copy to and from the tmp
* buffer
*/
- ret = analyze_fmt_conv(src, tmp, &this->ivmx, &nodes_per_rescale);
+ ret = analyze_fmt_conv(cont, src, tmp, &this->ivmx, &nodes_per_rescale);
if (ret < 0)
goto error;
/* We will not do any format conversion in the rotation stage */
- ret = analyze_fmt_conv(tmp, dst, &dummy_vmx, &nodes_per_rot);
+ ret = analyze_fmt_conv(cont, tmp, dst, &dummy_vmx, &nodes_per_rot);
if (ret < 0)
goto error;
@@ -1280,8 +1298,8 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
rot_count += count * num_rows;
rescale_count += num_rows;
- b2r2_log_info("%s: rightmost: %d nodes\n", __func__,
- count*num_rows);
+ b2r2_log_info(cont->dev, "%s: rightmost: %d nodes\n", __func__,
+ count*num_rows);
}
/* Calculate node count for the bottom tiles */
@@ -1290,8 +1308,8 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
rot_count += count * num_cols;
rescale_count += num_cols;
- b2r2_log_info("%s: bottom: %d nodes\n", __func__,
- count * num_cols);
+ b2r2_log_info(cont->dev, "%s: bottom: %d nodes\n", __func__,
+ count * num_cols);
}
@@ -1301,7 +1319,8 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
rot_count += count;
rescale_count++;
- b2r2_log_info("%s: bottom right: %d nodes\n", __func__, count);
+ b2r2_log_info(cont->dev, "%s: bottom right: %d nodes\n",
+ __func__, count);
}
@@ -1312,7 +1331,7 @@ static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
return ret;
}
@@ -1332,10 +1351,10 @@ static int analyze_rot_scale(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
-
bool upscale;
+ struct b2r2_control *cont = req->instance->control;
- ret = analyze_scale_factors(this);
+ ret = analyze_scale_factors(cont, this);
if (ret < 0)
goto error;
@@ -1367,20 +1386,19 @@ static int analyze_scaling(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
-
u32 copy_count;
u32 nbr_cols;
-
s32 dst_w;
+ struct b2r2_control *cont = req->instance->control;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
- ret = analyze_scale_factors(this);
+ ret = analyze_scale_factors(cont, this);
if (ret < 0)
goto error;
/* Find out how many nodes a simple copy would require */
- ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
&copy_count);
if (ret < 0)
goto error;
@@ -1398,18 +1416,19 @@ static int analyze_scaling(struct b2r2_node_split_job *this,
*
* The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels.
*/
- dst_w = rescale(B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
+ dst_w = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf);
if (dst_w < (1 << 10))
dst_w = 1;
else
dst_w >>= 10;
- b2r2_log_info("%s: dst_w=%d dst.rect.width=%d\n", __func__, dst_w,
- this->dst.rect.width);
+ b2r2_log_info(cont->dev, "%s: dst_w=%d dst.rect.width=%d\n",
+ __func__, dst_w, this->dst.rect.width);
this->dst.win.width = min(dst_w, this->dst.rect.width);
- b2r2_log_info("%s: dst.win.width=%d\n", __func__, this->dst.win.width);
+ b2r2_log_info(cont->dev, "%s: dst.win.width=%d\n",
+ __func__, this->dst.win.width);
nbr_cols = this->dst.rect.width / this->dst.win.width;
if (this->dst.rect.width % this->dst.win.width)
@@ -1419,12 +1438,12 @@ static int analyze_scaling(struct b2r2_node_split_job *this,
this->type = B2R2_SCALE;
- b2r2_log_info("%s exit\n", __func__);
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -1437,11 +1456,11 @@ static int analyze_rotate(struct b2r2_node_split_job *this,
u32 *buf_count)
{
int ret;
-
u32 nodes_per_tile;
+ struct b2r2_control *cont = req->instance->control;
/* Find out how many nodes a simple copy would require */
- ret = analyze_fmt_conv(&this->src, &this->dst, &this->ivmx,
+ ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx,
&nodes_per_tile);
if (ret < 0)
goto error;
@@ -1477,7 +1496,7 @@ static int analyze_rotate(struct b2r2_node_split_job *this,
else
tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
- setup_tmp_buf(tmp, this->max_buf_size, tmp_fmt,
+ setup_tmp_buf(cont, tmp, this->max_buf_size, tmp_fmt,
this->dst.win.width, this->dst.win.height);
tmp->tmp_buf_index = 1;
@@ -1506,14 +1525,15 @@ static int analyze_rotate(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
/**
* analyze_scale_factors() - determines the scale factors for the op
*/
-static int analyze_scale_factors(struct b2r2_node_split_job *this)
+static int analyze_scale_factors(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this)
{
int ret;
@@ -1521,22 +1541,22 @@ static int analyze_scale_factors(struct b2r2_node_split_job *this)
u16 vsf;
if (this->rotation) {
- ret = calculate_scale_factor(this->src.rect.width,
+ ret = calculate_scale_factor(cont, this->src.rect.width,
this->dst.rect.height, &hsf);
if (ret < 0)
goto error;
- ret = calculate_scale_factor(this->src.rect.height,
+ ret = calculate_scale_factor(cont, this->src.rect.height,
this->dst.rect.width, &vsf);
if (ret < 0)
goto error;
} else {
- ret = calculate_scale_factor(this->src.rect.width,
+ ret = calculate_scale_factor(cont, this->src.rect.width,
this->dst.rect.width, &hsf);
if (ret < 0)
goto error;
- ret = calculate_scale_factor(this->src.rect.height,
+ ret = calculate_scale_factor(cont, this->src.rect.height,
this->dst.rect.height, &vsf);
if (ret < 0)
goto error;
@@ -1548,20 +1568,21 @@ static int analyze_scale_factors(struct b2r2_node_split_job *this)
this->h_rsf = hsf;
this->v_rsf = vsf;
- b2r2_log_info("%s: h_rsf=%.4x\n", __func__, this->h_rsf);
- b2r2_log_info("%s: v_rsf=%.4x\n", __func__, this->v_rsf);
+ b2r2_log_info(cont->dev, "%s: h_rsf=%.4x\n", __func__, this->h_rsf);
+ b2r2_log_info(cont->dev, "%s: v_rsf=%.4x\n", __func__, this->v_rsf);
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
/**
* configure_tile() - configures one tile of a blit operation
*/
-static int configure_tile(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next)
+static int configure_tile(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
{
int ret = 0;
@@ -1625,21 +1646,22 @@ static int configure_tile(struct b2r2_node_split_job *this,
/* Do the configuration depending on operation type */
switch (this->type) {
case B2R2_DIRECT_FILL:
- configure_direct_fill(node, this->src.color, dst, &last);
+ configure_direct_fill(cont, node, this->src.color, dst, &last);
break;
case B2R2_DIRECT_COPY:
- configure_direct_copy(node, src, dst, &last);
+ configure_direct_copy(cont, node, src, dst, &last);
break;
case B2R2_FILL:
- ret = configure_fill(node, src->color, src->fmt,
+ ret = configure_fill(cont, node, src->color, src->fmt,
dst, this->ivmx, &last);
break;
case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */
case B2R2_COPY:
- ret = configure_copy(node, src, dst, this->ivmx, &last, this);
+ ret = configure_copy(
+ cont, node, src, dst, this->ivmx, &last, this);
break;
case B2R2_ROTATE:
@@ -1647,8 +1669,8 @@ static int configure_tile(struct b2r2_node_split_job *this,
struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0];
if (this->blend) {
- b2r2_log_info("%s: rotation + blend\n",
- __func__);
+ b2r2_log_info(cont->dev, "%s: rotation + "
+ "blend\n", __func__);
tmp->win.x = 0;
tmp->win.y = tmp->win.height - 1;
@@ -1656,33 +1678,33 @@ static int configure_tile(struct b2r2_node_split_job *this,
tmp->win.height = dst->win.height;
/* Rotate to the temp buf */
- ret = configure_rotate(node, src, tmp,
+ ret = configure_rotate(cont, node, src, tmp,
this->ivmx, &node, NULL);
if (ret < 0)
goto error;
/* Then do a copy to the destination */
- ret = configure_copy(node, tmp, dst, NULL,
+ ret = configure_copy(cont, node, tmp, dst, NULL,
&last, this);
} else {
/* Just do a rotation */
- ret = configure_rotate(node, src, dst,
+ ret = configure_rotate(cont, node, src, dst,
this->ivmx, &last, this);
}
}
break;
case B2R2_SCALE:
- ret = configure_scale(node, src, dst, this->h_rsf, this->v_rsf,
- this->ivmx, &last, this);
+ ret = configure_scale(cont, node, src, dst, this->h_rsf,
+ this->v_rsf, this->ivmx, &last, this);
break;
case B2R2_SCALE_AND_ROTATE:
- ret = configure_rot_scale(this, node, &last);
+ ret = configure_rot_scale(cont, this, node, &last);
break;
default:
- b2r2_log_warn("%s: Unsupported request\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Unsupported request\n", __func__);
ret = -ENOSYS;
goto error;
break;
@@ -1698,7 +1720,7 @@ static int configure_tile(struct b2r2_node_split_job *this,
/* Configure blending and clipping */
do {
if (node == NULL) {
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Internal error! Out of nodes!\n",
__func__);
ret = -ENOMEM;
@@ -1706,18 +1728,17 @@ static int configure_tile(struct b2r2_node_split_job *this,
}
if (this->blend) {
- if (this->flags & B2R2_BLT_FLAG_BG_BLEND) {
- configure_bg(node, bg,
+ if (this->flags & B2R2_BLT_FLAG_BG_BLEND)
+ configure_bg(cont, node, bg,
this->swap_fg_bg);
- } else {
- configure_bg(node, dst,
+ else
+ configure_bg(cont, node, dst,
this->swap_fg_bg);
- }
- configure_blend(node, this->flags,
+ configure_blend(cont, node, this->flags,
this->global_alpha);
}
if (this->clip)
- configure_clip(node, &this->clip_rect);
+ configure_clip(cont, node, &this->clip_rect);
node = node->next;
@@ -1730,7 +1751,7 @@ static int configure_tile(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: Error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Error!\n", __func__);
return ret;
}
@@ -1740,7 +1761,8 @@ error:
* This functions configures a set of nodes for rotation using the destination
* window instead of the rectangle for calculating tiles.
*/
-static int configure_sub_rot(struct b2r2_node *node,
+static int configure_sub_rot(struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst,
const u32 *ivmx, struct b2r2_node **next,
@@ -1757,7 +1779,7 @@ static int configure_sub_rot(struct b2r2_node *node,
memcpy(&src_win, &src->win, sizeof(src_win));
memcpy(&dst_win, &dst->win, sizeof(dst_win));
- b2r2_log_info("%s: src_win=(%d, %d, %d, %d) "
+ b2r2_log_info(cont->dev, "%s: src_win=(%d, %d, %d, %d) "
"dst_win=(%d, %d, %d, %d)\n", __func__,
src_win.x, src_win.y, src_win.width, src_win.height,
dst_win.x, dst_win.y, dst_win.width, dst_win.height);
@@ -1772,21 +1794,23 @@ static int configure_sub_rot(struct b2r2_node *node,
u32 dst_y = dst->win.y;
u32 dst_h = dst->win.height;
- dst->win.width = min(dst->win.width,
- dst_win.width - (int)x_pixels);
+ dst->win.width = min(dst->win.width, dst_win.width -
+ (int)x_pixels);
src->win.height = dst->win.width;
- b2r2_log_info("%s: x_pixels=%d\n", __func__, x_pixels);
+ b2r2_log_info(cont->dev, "%s: x_pixels=%d\n",
+ __func__, x_pixels);
while (y_pixels < dst_win.height) {
dst->win.height = min(dst->win.height,
dst_win.height - (int)y_pixels);
src->win.width = dst->win.height;
- b2r2_log_info("%s: y_pixels=%d\n", __func__, y_pixels);
+ b2r2_log_info(cont->dev, "%s: y_pixels=%d\n",
+ __func__, y_pixels);
- ret = configure_rotate(node, src, dst, ivmx, &node,
- job);
+ ret = configure_rotate(cont, node, src, dst,
+ ivmx, &node, job);
if (ret < 0)
goto error;
@@ -1821,7 +1845,7 @@ static int configure_sub_rot(struct b2r2_node *node,
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
return ret;
}
@@ -1830,8 +1854,9 @@ error:
*
* When doing a downscale it is better to do the rotation last.
*/
-static int configure_rot_downscale(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next)
+static int configure_rot_downscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this,
+ struct b2r2_node *node, struct b2r2_node **next)
{
int ret;
@@ -1844,12 +1869,12 @@ static int configure_rot_downscale(struct b2r2_node_split_job *this,
tmp->win.width = dst->win.height;
tmp->win.height = dst->win.width;
- ret = configure_scale(node, src, tmp, this->h_rsf, this->v_rsf,
+ ret = configure_scale(cont, node, src, tmp, this->h_rsf, this->v_rsf,
this->ivmx, &node, this);
if (ret < 0)
goto error;
- ret = configure_sub_rot(node, tmp, dst, NULL, &node, this);
+ ret = configure_sub_rot(cont, node, tmp, dst, NULL, &node, this);
if (ret < 0)
goto error;
@@ -1858,7 +1883,7 @@ static int configure_rot_downscale(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_info("%s: error!\n", __func__);
+ b2r2_log_info(cont->dev, "%s: error!\n", __func__);
return ret;
}
@@ -1867,27 +1892,29 @@ error:
*
* When doing an upscale it is better to do the rotation first.
*/
-static int configure_rot_upscale(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next)
+static int configure_rot_upscale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
{
/* TODO: Implement a optimal upscale (rotation first) */
- return configure_rot_downscale(this, node, next);
+ return configure_rot_downscale(cont, this, node, next);
}
/**
* configure_rot_scale() - configures a combined rotation and scaling op
*/
-static int configure_rot_scale(struct b2r2_node_split_job *this,
- struct b2r2_node *node, struct b2r2_node **next)
+static int configure_rot_scale(struct b2r2_control *cont,
+ struct b2r2_node_split_job *this, struct b2r2_node *node,
+ struct b2r2_node **next)
{
int ret;
bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10);
if (upscale)
- ret = configure_rot_upscale(this, node, next);
+ ret = configure_rot_upscale(cont, this, node, next);
else
- ret = configure_rot_downscale(this, node, next);
+ ret = configure_rot_downscale(cont, this, node, next);
if (ret < 0)
goto error;
@@ -1895,7 +1922,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
return ret;
}
@@ -1909,8 +1936,12 @@ error:
*
* This operation will always consume one node only.
*/
-static void configure_direct_fill(struct b2r2_node *node, u32 color,
- struct b2r2_node_split_buf *dst, struct b2r2_node **next)
+static void configure_direct_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
{
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1;
node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL;
@@ -1939,9 +1970,12 @@ static void configure_direct_fill(struct b2r2_node *node, u32 color,
*
* This operation will always consume one node only.
*/
-static void configure_direct_copy(struct b2r2_node *node,
+static void configure_direct_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src,
- struct b2r2_node_split_buf *dst, struct b2r2_node **next)
+ struct b2r2_node_split_buf *dst,
+ struct b2r2_node **next)
{
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1;
node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY;
@@ -1971,21 +2005,26 @@ static void configure_direct_copy(struct b2r2_node *node,
* This operation will consume as many nodes as are required to write to the
* destination format.
*/
-static int configure_fill(struct b2r2_node *node, u32 color,
- enum b2r2_blt_fmt fmt, struct b2r2_node_split_buf *dst,
- const u32 *ivmx, struct b2r2_node **next)
+static int configure_fill(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
+ u32 color,
+ enum b2r2_blt_fmt fmt,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
+ struct b2r2_node **next)
{
int ret;
struct b2r2_node *last;
/* Configure the destination */
- ret = configure_dst(node, dst, ivmx, &last);
+ ret = configure_dst(cont, node, dst, ivmx, &last);
if (ret < 0)
goto error;
do {
if (node == NULL) {
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -2044,7 +2083,7 @@ static int configure_fill(struct b2r2_node *node, u32 color,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -2060,9 +2099,12 @@ error:
* This operation will consume as many nodes as are required to write to the
* destination format.
*/
-static int configure_copy(struct b2r2_node *node,
+static int configure_copy(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src,
- struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
struct b2r2_node **next,
struct b2r2_node_split_job *this)
{
@@ -2070,15 +2112,16 @@ static int configure_copy(struct b2r2_node *node,
struct b2r2_node *last;
- ret = configure_dst(node, dst, ivmx, &last);
+ ret = configure_dst(cont, node, dst, ivmx, &last);
if (ret < 0)
goto error;
/* Configure the source for each node */
do {
if (node == NULL) {
- b2r2_log_warn("%s: "
- " Internal error! Out of nodes!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: "
+ " Internal error! Out of nodes!\n",
+ __func__);
ret = -ENOMEM;
goto error;
}
@@ -2116,7 +2159,7 @@ static int configure_copy(struct b2r2_node *node,
node->node.GROUP7.B2R2_CML = request->clut_phys_addr;
}
/* Configure the source(s) */
- configure_src(node, src, ivmx);
+ configure_src(cont, node, src, ivmx);
node = node->next;
} while (node != last);
@@ -2126,7 +2169,7 @@ static int configure_copy(struct b2r2_node *node,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -2142,9 +2185,12 @@ error:
* This operation will consume as many nodes are are required by the combination
* of rotating and writing the destination format.
*/
-static int configure_rotate(struct b2r2_node *node,
+static int configure_rotate(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src,
- struct b2r2_node_split_buf *dst, const u32 *ivmx,
+ struct b2r2_node_split_buf *dst,
+ const u32 *ivmx,
struct b2r2_node **next,
struct b2r2_node_split_job *this)
{
@@ -2152,21 +2198,22 @@ static int configure_rotate(struct b2r2_node *node,
struct b2r2_node *last;
- ret = configure_copy(node, src, dst, ivmx, &last, this);
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
if (ret < 0)
goto error;
do {
if (node == NULL) {
- b2r2_log_warn("%s: "
- "Internal error! Out of nodes!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: "
+ "Internal error! Out of nodes!\n",
+ __func__);
ret = -ENOMEM;
goto error;
}
node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED;
- b2r2_log_debug("%s:\n"
+ b2r2_log_debug(cont->dev, "%s:\n"
"\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
"\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
"\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
@@ -2190,7 +2237,7 @@ static int configure_rotate(struct b2r2_node *node,
return 0;
error:
- b2r2_log_warn("%s: error!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: error!\n", __func__);
return ret;
}
@@ -2205,7 +2252,9 @@ error:
* @ivmx - the iVMX to use for color conversion
* @next - the next empty node in the node list
*/
-static int configure_scale(struct b2r2_node *node,
+static int configure_scale(
+ struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src,
struct b2r2_node_split_buf *dst,
u16 h_rsf, u16 v_rsf,
@@ -2250,6 +2299,8 @@ static int configure_scale(struct b2r2_node *node,
bool downsample;
struct b2r2_blt_rect tmp_win = src->win;
+ bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
+ bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
/* Rescale the normalized source window */
src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf);
@@ -2277,8 +2328,7 @@ static int configure_scale(struct b2r2_node *node,
luma_vsrc_init = src_y & 0x3ff;
/* Check for upsampling of chroma */
- upsample = src->type != B2R2_FMT_TYPE_RASTER &&
- !is_yuv444_fmt(src->fmt);
+ upsample = !src_raster && !is_yuv444_fmt(src->fmt);
if (upsample) {
h_rsf /= 2;
@@ -2287,8 +2337,7 @@ static int configure_scale(struct b2r2_node *node,
}
/* Check for downsampling of chroma */
- downsample = dst->type != B2R2_FMT_TYPE_RASTER &&
- !is_yuv444_fmt(dst->fmt);
+ downsample = !dst_raster && !is_yuv444_fmt(dst->fmt);
if (downsample) {
h_rsf *= 2;
@@ -2303,9 +2352,9 @@ static int configure_scale(struct b2r2_node *node,
/* Configure resize and filters */
fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER |
- B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
+ B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER;
luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER |
- B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
+ B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER;
rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) |
(v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
@@ -2313,13 +2362,13 @@ static int configure_scale(struct b2r2_node *node,
(luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT);
rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
- (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
- (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
- (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT |
- (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
- (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
- (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
+ (2 << B2R2_RZI_VNB_REPEAT_SHIFT) |
+ (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) |
+ (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT);
/*
* We should only filter if there is an actual rescale (i.e. not when
@@ -2354,28 +2403,27 @@ static int configure_scale(struct b2r2_node *node,
luma_vfp = luma_vf->v_coeffs_phys_addr;
}
- ret = configure_copy(node, src, dst, ivmx, &last, this);
+ ret = configure_copy(cont, node, src, dst, ivmx, &last, this);
if (ret < 0)
goto error;
do {
bool chroma_rescale =
- (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
+ (h_rsf != (1 << 10)) || (v_rsf != (1 << 10));
bool luma_rescale =
- (luma_h_rsf != (1 << 10)) || (luma_v_rsf != (1 << 10));
- bool src_raster = src->type == B2R2_FMT_TYPE_RASTER;
- bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER;
- bool dst_chroma =
- node->node.GROUP1.B2R2_TTY & B2R2_TTY_CHROMA_NOT_LUMA;
+ (luma_h_rsf != (1 << 10)) ||
+ (luma_v_rsf != (1 << 10));
+ bool dst_chroma = node->node.GROUP1.B2R2_TTY &
+ B2R2_TTY_CHROMA_NOT_LUMA;
+ bool dst_luma = !dst_chroma;
if (node == NULL) {
- b2r2_log_warn("%s: "
- "Internal error! Out of nodes!\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Internal error! Out "
+ "of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
}
-
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL;
/*
@@ -2391,10 +2439,10 @@ static int configure_scale(struct b2r2_node *node,
*/
if (!src_raster || (chroma_rescale &&
- (dst_raster || dst_chroma))) {
+ (dst_raster || dst_chroma))) {
/* Enable chroma resize */
node->node.GROUP0.B2R2_INS |=
- B2R2_INS_RESCALE2D_ENABLED;
+ B2R2_INS_RESCALE2D_ENABLED;
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA;
node->node.GROUP8.B2R2_FCTL |= fctl;
@@ -2405,10 +2453,10 @@ static int configure_scale(struct b2r2_node *node,
}
if (!src_raster || (luma_rescale &&
- (dst_raster || !dst_chroma))) {
+ (dst_raster || dst_luma))) {
/* Enable luma resize */
node->node.GROUP0.B2R2_INS |=
- B2R2_INS_RESCALE2D_ENABLED;
+ B2R2_INS_RESCALE2D_ENABLED;
node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA;
node->node.GROUP8.B2R2_FCTL |= luma_fctl;
@@ -2442,7 +2490,7 @@ static int configure_scale(struct b2r2_node *node,
}
}
- b2r2_log_info("%s:\n"
+ b2r2_log_info(cont->dev, "%s:\n"
"\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n"
"\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n"
"\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n"
@@ -2468,7 +2516,7 @@ static int configure_scale(struct b2r2_node *node,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -2481,12 +2529,14 @@ error:
*
* This operation will not consume any nodes
*/
-static void configure_src(struct b2r2_node *node,
+static void configure_src(struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *src, const u32 *ivmx)
{
struct b2r2_node_split_buf tmp_buf;
- b2r2_log_info("%s: src.win=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s: src.win=(%d, %d, %d, %d)\n", __func__,
src->win.x, src->win.y, src->win.width,
src->win.height);
@@ -2508,7 +2558,7 @@ static void configure_src(struct b2r2_node *node,
if (is_yuv420_fmt(src->fmt)) {
tmp_buf.win.height =
- (tmp_buf.win.height + 1) / 2;
+ (tmp_buf.win.height + 1) / 2;
tmp_buf.win.y >>= 1;
}
}
@@ -2536,7 +2586,7 @@ static void configure_src(struct b2r2_node *node,
*/
if (is_yuv420_fmt(src->fmt)) {
tmp_buf.win.height =
- (tmp_buf.win.height + 1) / 2;
+ (tmp_buf.win.height + 1) / 2;
tmp_buf.win.y >>= 1;
}
}
@@ -2572,10 +2622,12 @@ static void configure_src(struct b2r2_node *node,
* WARNING: Take care when using this with semi-planar or planar sources since
* either S1 or S2 will be overwritten!
*/
-static void configure_bg(struct b2r2_node *node,
+static void configure_bg(struct b2r2_control *cont,
+ struct b2r2_node *node,
struct b2r2_node_split_buf *bg, bool swap_fg_bg)
{
- b2r2_log_info("%s: bg.win=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s: bg.win=(%d, %d, %d, %d)\n", __func__,
bg->win.x, bg->win.y, bg->win.width,
bg->win.height);
@@ -2614,7 +2666,7 @@ static void configure_bg(struct b2r2_node *node,
* This operation will consume as many nodes as are required to write the
* destination format.
*/
-static int configure_dst(struct b2r2_node *node,
+static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_node_split_buf *dst, const u32 *ivmx,
struct b2r2_node **next)
{
@@ -2624,9 +2676,10 @@ static int configure_dst(struct b2r2_node *node,
struct b2r2_node_split_buf dst_planes[3];
- b2r2_log_info("%s: dst.win=(%d, %d, %d, %d)\n", __func__,
- dst->win.x, dst->win.y, dst->win.width,
- dst->win.height);
+ b2r2_log_info(cont->dev,
+ "%s: dst.win=(%d, %d, %d, %d)\n", __func__,
+ dst->win.x, dst->win.y, dst->win.width,
+ dst->win.height);
memcpy(&dst_planes[0], dst, sizeof(dst_planes[0]));
@@ -2691,7 +2744,7 @@ static int configure_dst(struct b2r2_node *node,
for (i = 0; i < nbr_planes; i++) {
if (node == NULL) {
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -2729,7 +2782,6 @@ static int configure_dst(struct b2r2_node *node,
}
}
-
set_target(node, dst_planes[i].addr, &dst_planes[i]);
node = node->next;
@@ -2740,7 +2792,7 @@ static int configure_dst(struct b2r2_node *node,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -2760,7 +2812,8 @@ error:
* WARNING: Take care when using this with semi-planar or planar sources since
* either S1 or S2 will be overwritten!
*/
-static void configure_blend(struct b2r2_node *node, u32 flags, u32 global_alpha)
+static void configure_blend(struct b2r2_control *cont,
+ struct b2r2_node *node, u32 flags, u32 global_alpha)
{
node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3);
@@ -2792,7 +2845,7 @@ static void configure_blend(struct b2r2_node *node, u32 flags, u32 global_alpha)
*
* This operation does not consume any nodes.
*/
-static void configure_clip(struct b2r2_node *node,
+static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node,
struct b2r2_blt_rect *clip_rect)
{
s32 l = clip_rect->x;
@@ -2821,9 +2874,13 @@ static void configure_clip(struct b2r2_node *node,
* @color_fill - determines whether the buffer should be used for color fill
* @color - the color to use in case of color fill
*/
-static void set_buf(struct b2r2_node_split_buf *buf, u32 addr,
+static void set_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *buf,
+ u32 addr,
const struct b2r2_blt_img *img,
- const struct b2r2_blt_rect *rect, bool color_fill, u32 color)
+ const struct b2r2_blt_rect *rect,
+ bool color_fill,
+ u32 color)
{
memset(buf, 0, sizeof(*buf));
@@ -2895,8 +2952,12 @@ static void set_buf(struct b2r2_node_split_buf *buf, u32 addr,
/**
* setup_tmp_buf() - configure a temporary buffer
*/
-static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
- enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height)
+static int setup_tmp_buf(struct b2r2_control *cont,
+ struct b2r2_node_split_buf *tmp,
+ u32 max_size,
+ enum b2r2_blt_fmt pref_fmt,
+ u32 pref_width,
+ u32 pref_height)
{
int ret;
@@ -2918,9 +2979,9 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
} else {
/* Wait, what? */
- b2r2_log_warn("%s: "
- "Cannot create tmp buf from this fmt (%d)\n", __func__,
- pref_fmt);
+ b2r2_log_warn(cont->dev, "%s: "
+ "Cannot create tmp buf from this fmt (%d)\n",
+ __func__, pref_fmt);
ret = -EINVAL;
goto error;
}
@@ -2941,7 +3002,7 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
/* We should at least have enough room for one scanline */
if (height == 0) {
- b2r2_log_warn("%s: Not enough tmp mem!\n",
+ b2r2_log_warn(cont->dev, "%s: Not enough tmp mem!\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -2961,7 +3022,7 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -3449,12 +3510,12 @@ static bool is_transform(const struct b2r2_blt_request *req)
*
* Returns the rescaled dimension in 22.10 fixed point format.
*/
-static s32 rescale(s32 dim, u16 sf)
+static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf)
{
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
if (sf == 0) {
- b2r2_log_err("%s: Scale factor is 0!\n", __func__);
+ b2r2_log_err(cont->dev, "%s: Scale factor is 0!\n", __func__);
BUG_ON(1);
}
@@ -3662,14 +3723,12 @@ static void reset_nodes(struct b2r2_node *node)
}
}
-int b2r2_node_split_init(void)
+int b2r2_node_split_init(struct b2r2_control *cont)
{
- b2r2_filters_init();
-
return 0;
}
-void b2r2_node_split_exit(void)
+void b2r2_node_split_exit(struct b2r2_control *cont)
{
- b2r2_filters_exit();
+
}
diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h
index 5bceac28488..a577241c31b 100644
--- a/drivers/video/b2r2/b2r2_node_split.h
+++ b/drivers/video/b2r2/b2r2_node_split.h
@@ -54,8 +54,8 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size
* Returns:
* A negative value if an error occurred, 0 otherwise.
*/
-int b2r2_node_split_configure(struct b2r2_node_split_job *job,
- struct b2r2_node *first);
+int b2r2_node_split_configure(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job, struct b2r2_node *first);
/**
* b2r2_node_split_assign_buffers() - Assignes physical addresses
@@ -74,7 +74,8 @@ int b2r2_node_split_configure(struct b2r2_node_split_job *job,
* Returns:
* A negative value if an error occurred, 0 otherwise.
*/
-int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *job,
+int b2r2_node_split_assign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
struct b2r2_node *first, struct b2r2_work_buf *bufs,
u32 buf_count);
@@ -89,7 +90,8 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *job,
* This makes it possible to reuse the node list with new buffers by calling
* b2r2_node_split_assign_buffers again. Useful for caching node lists.
*/
-void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *job,
+void b2r2_node_split_unassign_buffers(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job,
struct b2r2_node *first);
/**
@@ -102,20 +104,21 @@ void b2r2_node_split_unassign_buffers(struct b2r2_node_split_job *job,
* This should always be called once b2r2_node_split_analyze has been called
* in order to release any resources allocated while analyzing.
*/
-void b2r2_node_split_cancel(struct b2r2_node_split_job *job);
+void b2r2_node_split_cancel(struct b2r2_control *cont,
+ struct b2r2_node_split_job *job);
/**
* b2r2_node_split_init() - Initializes the node split module
*
* Initializes the node split module and creates debugfs files.
*/
-int b2r2_node_split_init(void);
+int b2r2_node_split_init(struct b2r2_control *cont);
/**
* b2r2_node_split_exit() - Deinitializes the node split module
*
* Releases all resources for the node split module.
*/
-void b2r2_node_split_exit(void);
+void b2r2_node_split_exit(struct b2r2_control *cont);
#endif
diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
index 2e7c5ca5a7a..e038941b4e8 100644
--- a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
+++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c
@@ -32,10 +32,10 @@ module_param(print_blts_on, bool, S_IRUGO | S_IWUSR);
static int use_mpix_per_second_in_print_blts = 1;
module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR);
-static int min_avg_max_mpix_per_second_on = 1;
-module_param(min_avg_max_mpix_per_second_on, bool, S_IRUGO | S_IWUSR);
+static int profiler_stats_on = 1;
+module_param(profiler_stats_on, bool, S_IRUGO | S_IWUSR);
-static const unsigned int min_avg_max_mpix_per_second_num_blts_used = 400;
+static const unsigned int profiler_stats_blts_used = 400;
static struct {
unsigned long sampling_start_time_jiffies;
@@ -51,22 +51,27 @@ static struct {
s32 accumulated_num_usecs;
u32 num_blts_done;
-} min_avg_max_mpix_per_second_state;
+} profiler_stats;
static s32 nsec_2_usec(const s32 nsec);
static int is_scale_blt(const struct b2r2_blt_req * const request);
-static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
-static void print_blt(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request);
static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs);
-static void print_min_avg_max_mpix_per_second_state(void);
-static void reset_min_avg_max_mpix_per_second_state(void);
-static void do_min_avg_max_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void print_profiler_stats(void);
+static void reset_profiler_stats(void);
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
-static void blt_done(const struct b2r2_blt_req * const blt, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info);
+static void blt_done(const struct b2r2_blt_req * const blt,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info);
static struct b2r2_profiler this = {
@@ -83,23 +88,30 @@ static s32 nsec_2_usec(const s32 nsec)
static int is_scale_blt(const struct b2r2_blt_req * const request)
{
if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 &&
- (request->src_rect.width != request->dst_rect.height ||
- request->src_rect.height != request->dst_rect.width)) ||
+ (request->src_rect.width !=
+ request->dst_rect.height ||
+ request->src_rect.height !=
+ request->dst_rect.width)) ||
(!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) &&
- (request->src_rect.width != request->dst_rect.width ||
- request->src_rect.height != request->dst_rect.height)))
+ (request->src_rect.width !=
+ request->dst_rect.width ||
+ request->src_rect.height !=
+ request->dst_rect.height)))
return 1;
else
return 0;
}
-static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
{
return get_mpix_per_second(get_num_pixels_in_blt(request),
- nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + blt_profiling_info->nsec_active_in_b2r2));
+ nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2));
}
-static void print_blt(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+static void print_blt(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
{
char tmp_str[128];
sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i",
@@ -110,13 +122,11 @@ static void print_blt(const struct b2r2_blt_req * const request, const struct b2
is_scale_blt(request),
get_num_pixels_in_blt(request));
if (use_mpix_per_second_in_print_blts)
- printk(KERN_ALERT "%s, MPix/s: %3i\n",
- tmp_str,
+ printk(KERN_ALERT "%s, MPix/s: %3i\n", tmp_str,
get_blt_mpix_per_second(request, blt_profiling_info));
else
printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n",
- tmp_str,
- blt_profiling_info->nsec_active_in_cpu,
+ tmp_str, blt_profiling_info->nsec_active_in_cpu,
blt_profiling_info->nsec_active_in_b2r2,
blt_profiling_info->total_time_nsec);
}
@@ -135,8 +145,10 @@ static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request)
static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
{
- s32 num_pixels_scale_factor = num_pixels != 0 ? S32_MAX / num_pixels : S32_MAX;
- s32 num_usecs_scale_factor = num_usecs != 0 ? S32_MAX / num_usecs : S32_MAX;
+ s32 num_pixels_scale_factor = num_pixels != 0 ?
+ S32_MAX / num_pixels : S32_MAX;
+ s32 num_usecs_scale_factor = num_usecs != 0 ?
+ S32_MAX / num_usecs : S32_MAX;
s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor);
s32 num_pixels_scaled = num_pixels * scale_factor;
@@ -148,82 +160,82 @@ static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs)
return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000);
}
-static void print_min_avg_max_mpix_per_second_state(void)
+static void print_profiler_stats(void)
{
printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n",
- min_avg_max_mpix_per_second_state.min_mpix_per_second,
- get_mpix_per_second(min_avg_max_mpix_per_second_state.accumulated_num_pixels,
- min_avg_max_mpix_per_second_state.accumulated_num_usecs),
- min_avg_max_mpix_per_second_state.max_mpix_per_second);
+ profiler_stats.min_mpix_per_second,
+ get_mpix_per_second(
+ profiler_stats.accumulated_num_pixels,
+ profiler_stats.accumulated_num_usecs),
+ profiler_stats.max_mpix_per_second);
printk(KERN_ALERT "Min blit:\n");
- print_blt(&min_avg_max_mpix_per_second_state.min_blt_request,
- &min_avg_max_mpix_per_second_state.min_blt_profiling_info);
+ print_blt(&profiler_stats.min_blt_request,
+ &profiler_stats.min_blt_profiling_info);
printk(KERN_ALERT "Max blit:\n");
- print_blt(&min_avg_max_mpix_per_second_state.max_blt_request,
- &min_avg_max_mpix_per_second_state.max_blt_profiling_info);
+ print_blt(&profiler_stats.max_blt_request,
+ &profiler_stats.max_blt_profiling_info);
}
-static void reset_min_avg_max_mpix_per_second_state(void)
+static void reset_profiler_stats(void)
{
- min_avg_max_mpix_per_second_state.sampling_start_time_jiffies =
- jiffies;
- min_avg_max_mpix_per_second_state.min_mpix_per_second = S32_MAX;
- min_avg_max_mpix_per_second_state.max_mpix_per_second = 0;
- min_avg_max_mpix_per_second_state.accumulated_num_pixels = 0;
- min_avg_max_mpix_per_second_state.accumulated_num_usecs = 0;
- min_avg_max_mpix_per_second_state.num_blts_done = 0;
+ profiler_stats.sampling_start_time_jiffies = jiffies;
+ profiler_stats.min_mpix_per_second = S32_MAX;
+ profiler_stats.max_mpix_per_second = 0;
+ profiler_stats.accumulated_num_pixels = 0;
+ profiler_stats.accumulated_num_usecs = 0;
+ profiler_stats.num_blts_done = 0;
}
-static void do_min_avg_max_mpix_per_second(const struct b2r2_blt_req * const request, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+static void do_profiler_stats(const struct b2r2_blt_req * const request,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
{
s32 num_pixels_in_blt;
s32 num_usec_blt_took;
s32 blt_mpix_per_second;
- if (time_before(jiffies, min_avg_max_mpix_per_second_state.sampling_start_time_jiffies))
+ if (time_before(jiffies, profiler_stats.sampling_start_time_jiffies))
return;
num_pixels_in_blt = get_num_pixels_in_blt(request);
- num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + blt_profiling_info->nsec_active_in_b2r2);
+ num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu +
+ blt_profiling_info->nsec_active_in_b2r2);
blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt,
num_usec_blt_took);
- if (blt_mpix_per_second <= min_avg_max_mpix_per_second_state.min_mpix_per_second) {
- min_avg_max_mpix_per_second_state.min_mpix_per_second =
- blt_mpix_per_second;
- memcpy(&min_avg_max_mpix_per_second_state.min_blt_request,
+ if (blt_mpix_per_second <=
+ profiler_stats.min_mpix_per_second) {
+ profiler_stats.min_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.min_blt_request,
request, sizeof(struct b2r2_blt_req));
- memcpy(&min_avg_max_mpix_per_second_state.min_blt_profiling_info,
- blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
+ memcpy(&profiler_stats.min_blt_profiling_info,
+ blt_profiling_info,
+ sizeof(struct b2r2_blt_profiling_info));
}
- if (blt_mpix_per_second >= min_avg_max_mpix_per_second_state.max_mpix_per_second) {
- min_avg_max_mpix_per_second_state.max_mpix_per_second =
- blt_mpix_per_second;
- memcpy(&min_avg_max_mpix_per_second_state.max_blt_request,
- request, sizeof(struct b2r2_blt_req));
- memcpy(&min_avg_max_mpix_per_second_state.max_blt_profiling_info,
+ if (blt_mpix_per_second >= profiler_stats.max_mpix_per_second) {
+ profiler_stats.max_mpix_per_second = blt_mpix_per_second;
+ memcpy(&profiler_stats.max_blt_request, request,
+ sizeof(struct b2r2_blt_req));
+ memcpy(&profiler_stats.max_blt_profiling_info,
blt_profiling_info, sizeof(struct b2r2_blt_profiling_info));
}
- min_avg_max_mpix_per_second_state.accumulated_num_pixels +=
- num_pixels_in_blt;
- min_avg_max_mpix_per_second_state.accumulated_num_usecs +=
- num_usec_blt_took;
-
- min_avg_max_mpix_per_second_state.num_blts_done++;
+ profiler_stats.accumulated_num_pixels += num_pixels_in_blt;
+ profiler_stats.accumulated_num_usecs += num_usec_blt_took;
+ profiler_stats.num_blts_done++;
- if (min_avg_max_mpix_per_second_state.num_blts_done >= min_avg_max_mpix_per_second_num_blts_used) {
- print_min_avg_max_mpix_per_second_state();
- reset_min_avg_max_mpix_per_second_state();
+ if (profiler_stats.num_blts_done >= profiler_stats_blts_used) {
+ print_profiler_stats();
+ reset_profiler_stats();
/* The printouts initiated above can disturb the next measurement
so we delay it two seconds to give the printouts a chance to finish. */
- min_avg_max_mpix_per_second_state.sampling_start_time_jiffies =
- jiffies + (2 * HZ);
+ profiler_stats.sampling_start_time_jiffies = jiffies + (2 * HZ);
}
}
-static void blt_done(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info)
+static void blt_done(const struct b2r2_blt_req * const request,
+ const s32 request_id,
+ const struct b2r2_blt_profiling_info * const blt_profiling_info)
{
/* Filters */
if (src_format_filter_on && request->src_img.fmt != src_format_filter)
@@ -233,14 +245,14 @@ static void blt_done(const struct b2r2_blt_req * const request, const s32 reques
if (print_blts_on)
print_blt(request, blt_profiling_info);
- if (min_avg_max_mpix_per_second_on)
- do_min_avg_max_mpix_per_second(request, blt_profiling_info);
+ if (profiler_stats_on)
+ do_profiler_stats(request, blt_profiling_info);
}
static int __init b2r2_profiler_init(void)
{
- reset_min_avg_max_mpix_per_second_state();
+ reset_profiler_stats();
return b2r2_register_profiler(&this);
}
diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c
index f96ab5be76e..ffa7f2870c8 100644
--- a/drivers/video/b2r2/b2r2_profiler_socket.c
+++ b/drivers/video/b2r2/b2r2_profiler_socket.c
@@ -81,12 +81,13 @@ void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request)
{
int return_value;
struct b2r2_blt_profiling_info blt_profiling_info;
+ struct b2r2_control *cont = request->instance->control;
return_value = down_interruptible(&b2r2_profiler_lock);
if (return_value != 0) {
- dev_err(b2r2_blt_device(),
- "%s: Failed to acquire semaphore, ret=%i. Lost profiler call!\n",
- __func__, return_value);
+ dev_err(cont->dev,
+ "%s: Failed to acquire semaphore, ret=%i. "
+ "Lost profiler call!\n", __func__, return_value);
return;
}
diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c
index 87949a37b6b..3df7a272211 100644
--- a/drivers/video/b2r2/b2r2_utils.c
+++ b/drivers/video/b2r2/b2r2_utils.c
@@ -8,15 +8,14 @@
* License terms: GNU General Public License (GPL), version 2.
*/
-#include "b2r2_utils.h"
-
-#include "b2r2_debug.h"
-
-#include <video/b2r2_blt.h>
-
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <video/b2r2_blt.h>
+
+#include "b2r2_utils.h"
+#include "b2r2_debug.h"
+#include "b2r2_internal.h"
const s32 b2r2_s32_max = 2147483647;
@@ -25,18 +24,19 @@ const s32 b2r2_s32_max = 2147483647;
* calculate_scale_factor() - calculates the scale factor between the given
* values
*/
-int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out)
{
int ret;
u32 sf;
- b2r2_log_info("%s\n", __func__);
+ b2r2_log_info(cont->dev, "%s\n", __func__);
if (to == from) {
*sf_out = 1 << 10;
return 0;
} else if (to == 0) {
- b2r2_log_err("%s: To is 0!\n", __func__);
+ b2r2_log_err(cont->dev, "%s: To is 0!\n", __func__);
BUG_ON(1);
}
@@ -44,12 +44,12 @@ int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
if ((sf & 0xffff0000) != 0) {
/* Overflow error */
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Scale factor too large\n", __func__);
ret = -EINVAL;
goto error;
} else if (sf == 0) {
- b2r2_log_warn("%s: "
+ b2r2_log_warn(cont->dev, "%s: "
"Scale factor too small\n", __func__);
ret = -EINVAL;
goto error;
@@ -57,12 +57,12 @@ int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
*sf_out = (u16)sf;
- b2r2_log_info("%s exit\n", __func__);
+ b2r2_log_info(cont->dev, "%s exit\n", __func__);
return 0;
error:
- b2r2_log_warn("%s: Exit...\n", __func__);
+ b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__);
return ret;
}
@@ -127,7 +127,8 @@ void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
* the old source rectangle corresponds to
* to the new part of old destination rectangle.
*/
-void b2r2_trim_rects(const struct b2r2_blt_req *req,
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
struct b2r2_blt_rect *new_bg_rect,
struct b2r2_blt_rect *new_dst_rect,
struct b2r2_blt_rect *new_src_rect)
@@ -149,10 +150,12 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
s16 hsf;
s16 vsf;
- b2r2_log_info("%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
old_dst_rect->x, old_dst_rect->y,
old_dst_rect->width, old_dst_rect->height);
- b2r2_log_info("%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
old_src_rect->x, old_src_rect->y,
old_src_rect->width, old_src_rect->height);
@@ -164,7 +167,8 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
goto keep_rects;
b2r2_intersect_rects(old_dst_rect, &dst_img_bounds, new_dst_rect);
- b2r2_log_info("%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
new_dst_rect->x, new_dst_rect->y,
new_dst_rect->width, new_dst_rect->height);
@@ -177,13 +181,13 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
int res = 0;
- res = calculate_scale_factor(old_src_rect->width,
+ res = calculate_scale_factor(cont, old_src_rect->width,
old_dst_rect->height, &hsf);
/* invalid dimensions, leave them to validation */
if (res < 0)
goto keep_rects;
- res = calculate_scale_factor(old_src_rect->height,
+ res = calculate_scale_factor(cont, old_src_rect->height,
old_dst_rect->width, &vsf);
if (res < 0)
goto keep_rects;
@@ -203,12 +207,12 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
src_h = new_dst_rect->width * vsf;
} else {
int res = 0;
- res = calculate_scale_factor(old_src_rect->width,
+ res = calculate_scale_factor(cont, old_src_rect->width,
old_dst_rect->width, &hsf);
if (res < 0)
goto keep_rects;
- res = calculate_scale_factor(old_src_rect->height,
+ res = calculate_scale_factor(cont, old_src_rect->height,
old_dst_rect->height, &vsf);
if (res < 0)
goto keep_rects;
@@ -266,7 +270,8 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
new_src_rect->width = src_w;
new_src_rect->height = src_h;
- b2r2_log_info("%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info(cont->dev,
+ "%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__,
new_src_rect->x, new_src_rect->y,
new_src_rect->width, new_src_rect->height);
@@ -274,14 +279,16 @@ void b2r2_trim_rects(const struct b2r2_blt_req *req,
/* Modify bg_rect in the same way as dst_rect */
s32 dw = new_dst_rect->width - old_dst_rect->width;
s32 dh = new_dst_rect->height - old_dst_rect->height;
- b2r2_log_info("%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ b2r2_log_info(cont->dev,
+ "%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
__func__, old_bg_rect->x, old_bg_rect->y,
old_bg_rect->width, old_bg_rect->height);
new_bg_rect->x = old_bg_rect->x + dx;
new_bg_rect->y = old_bg_rect->y + dy;
new_bg_rect->width = old_bg_rect->width + dw;
new_bg_rect->height = old_bg_rect->height + dh;
- b2r2_log_info("%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
+ b2r2_log_info(cont->dev,
+ "%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n",
__func__, new_bg_rect->x, new_bg_rect->y,
new_bg_rect->width, new_bg_rect->height);
}
@@ -294,11 +301,11 @@ keep_rects:
*new_src_rect = *old_src_rect;
*new_dst_rect = *old_dst_rect;
*new_bg_rect = *old_bg_rect;
- b2r2_log_info("%s original rectangles preserved.\n", __func__);
+ b2r2_log_info(cont->dev, "%s original rectangles preserved.\n", __func__);
return;
}
-int b2r2_get_fmt_bpp(enum b2r2_blt_fmt fmt)
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
{
/*
* Currently this function is not used that often but if that changes a
@@ -344,13 +351,14 @@ int b2r2_get_fmt_bpp(enum b2r2_blt_fmt fmt)
return 32;
default:
- b2r2_log_err("%s: Internal error! Format %#x not recognized.\n",
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Format %#x not recognized.\n",
__func__, fmt);
return 32;
}
}
-int b2r2_get_fmt_y_bpp(enum b2r2_blt_fmt fmt)
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt)
{
switch (fmt) {
case B2R2_BLT_FMT_YUV420_PACKED_PLANAR:
@@ -373,7 +381,8 @@ int b2r2_get_fmt_y_bpp(enum b2r2_blt_fmt fmt)
return 8;
default:
- b2r2_log_err("%s: Internal error! Non YCbCr format supplied.\n",
+ b2r2_log_err(cont->dev,
+ "%s: Internal error! Non YCbCr format supplied.\n",
__func__);
return 8;
}
@@ -533,39 +542,40 @@ bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt)
}
}
-u32 b2r2_calc_pitch_from_width(s32 width, enum b2r2_blt_fmt fmt)
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt)
{
if (b2r2_is_single_plane_fmt(fmt)) {
return (u32)b2r2_div_round_up(width *
- b2r2_get_fmt_bpp(fmt), 8);
+ b2r2_get_fmt_bpp(cont, fmt), 8);
} else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) {
return (u32)b2r2_div_round_up(width *
- b2r2_get_fmt_y_bpp(fmt), 8);
+ b2r2_get_fmt_y_bpp(cont, fmt), 8);
} else {
- b2r2_log_err("%s: Internal error! "
- "Pitchless format supplied.\n",
- __func__);
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Pitchless format supplied.\n",
+ __func__);
return 0;
}
}
-u32 b2r2_get_img_pitch(struct b2r2_blt_img *img)
+u32 b2r2_get_img_pitch(struct b2r2_control *cont, struct b2r2_blt_img *img)
{
if (img->pitch != 0)
return img->pitch;
else
- return b2r2_calc_pitch_from_width(img->width, img->fmt);
+ return b2r2_calc_pitch_from_width(cont, img->width, img->fmt);
}
-s32 b2r2_get_img_size(struct b2r2_blt_img *img)
+s32 b2r2_get_img_size(struct b2r2_control *cont, struct b2r2_blt_img *img)
{
if (b2r2_is_single_plane_fmt(img->fmt)) {
- return (s32)b2r2_get_img_pitch(img) * img->height;
+ return (s32)b2r2_get_img_pitch(cont, img) * img->height;
} else if (b2r2_is_ycbcrsp_fmt(img->fmt) ||
b2r2_is_ycbcrp_fmt(img->fmt)) {
s32 y_plane_size;
- y_plane_size = (s32)b2r2_get_img_pitch(img) * img->height;
+ y_plane_size = (s32)b2r2_get_img_pitch(cont, img) * img->height;
if (b2r2_is_ycbcr420_fmt(img->fmt)) {
return y_plane_size + y_plane_size / 2;
@@ -574,18 +584,18 @@ s32 b2r2_get_img_size(struct b2r2_blt_img *img)
} else if (b2r2_is_ycbcr444_fmt(img->fmt)) {
return y_plane_size * 3;
} else {
- b2r2_log_err("%s: Internal error! "
- "Format %#x not recognized.\n",
- __func__, img->fmt);
+ b2r2_log_err(cont->dev, "%s: Internal error!"
+ " Format %#x not recognized.\n",
+ __func__, img->fmt);
return 0;
}
} else if (b2r2_is_mb_fmt(img->fmt)) {
return (img->width * img->height *
- b2r2_get_fmt_bpp(img->fmt)) / 8;
+ b2r2_get_fmt_bpp(cont, img->fmt)) / 8;
} else {
- b2r2_log_err("%s: Internal error! "
- "Format %#x not recognized.\n",
- __func__, img->fmt);
+ b2r2_log_err(cont->dev, "%s: Internal error! "
+ "Format %#x not recognized.\n",
+ __func__, img->fmt);
return 0;
}
}
diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h
index 4597b2bd684..0516447b42f 100644
--- a/drivers/video/b2r2/b2r2_utils.h
+++ b/drivers/video/b2r2/b2r2_utils.h
@@ -13,9 +13,12 @@
#include <video/b2r2_blt.h>
+#include "b2r2_internal.h"
+
extern const s32 b2r2_s32_max;
-int calculate_scale_factor(u32 from, u32 to, u16 *sf_out);
+int calculate_scale_factor(struct b2r2_control *cont,
+ u32 from, u32 to, u16 *sf_out);
void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img,
struct b2r2_blt_rect *bounding_rect);
@@ -27,13 +30,14 @@ bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1,
void b2r2_intersect_rects(struct b2r2_blt_rect *rect1,
struct b2r2_blt_rect *rect2,
struct b2r2_blt_rect *intersection);
-void b2r2_trim_rects(const struct b2r2_blt_req *req,
+void b2r2_trim_rects(struct b2r2_control *cont,
+ const struct b2r2_blt_req *req,
struct b2r2_blt_rect *new_bg_rect,
struct b2r2_blt_rect *new_dst_rect,
struct b2r2_blt_rect *new_src_rect);
-int b2r2_get_fmt_bpp(enum b2r2_blt_fmt fmt);
-int b2r2_get_fmt_y_bpp(enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
+int b2r2_get_fmt_y_bpp(struct b2r2_control *cont, enum b2r2_blt_fmt fmt);
bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt);
bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt);
@@ -48,9 +52,12 @@ bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt);
/*
* Rounds up if an invalid width causes the pitch to be non byte aligned.
*/
-u32 b2r2_calc_pitch_from_width(s32 width, enum b2r2_blt_fmt fmt);
-u32 b2r2_get_img_pitch(struct b2r2_blt_img *img);
-s32 b2r2_get_img_size(struct b2r2_blt_img *img);
+u32 b2r2_calc_pitch_from_width(struct b2r2_control *cont,
+ s32 width, enum b2r2_blt_fmt fmt);
+u32 b2r2_get_img_pitch(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
+s32 b2r2_get_img_size(struct b2r2_control *cont,
+ struct b2r2_blt_img *img);
s32 b2r2_div_round_up(s32 dividend, s32 divisor);
bool b2r2_is_aligned(s32 value, s32 alignment);