aboutsummaryrefslogtreecommitdiff
path: root/drivers/video
diff options
context:
space:
mode:
authorFredrik Allansson <fredrik.allansson@stericsson.com>2010-05-27 16:20:10 +0200
committerJohn Rigby <john.rigby@linaro.org>2010-09-02 22:45:30 -0600
commit63cfdf397fd4502644ea06bcb764e9c4179f7cd3 (patch)
tree5f62cfa7e62431b447400e2469cf18da72bea428 /drivers/video
parent8249f2c124f14f2467a1033146385497008b3adf (diff)
B2R2: Changed behaviour of log prints.
The use of printk for error and debug prints has been changed to a set of log macros. The verbosity of the log prints can now be controlled through debugfs. If the kernel is compiled with support for dynamic debug (Documentation/dynamic-debug-howto.txt), this will be used instead of the debugfs files. ST-Ericsson Change ID: ER261401 Signed-off-by: Fredrik Allansson <fredrik.allansson@stericsson.com> Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/502 Reviewed-by: Robert LIND <robert.lind@stericsson.com> Reviewed-by: Robert FEKETE <robert.fekete@stericsson.com> Tested-by: Robert FEKETE <robert.fekete@stericsson.com> Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> Change-Id: Ifbbc09525636ef396d4814ce21b61861983fbbe0 Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/2377 Reviewed-by: Jonas ABERG <jonas.aberg@stericsson.com>
Diffstat (limited to 'drivers/video')
-rw-r--r--drivers/video/b2r2/Makefile14
-rw-r--r--drivers/video/b2r2/b2r2_blt_main.c342
-rw-r--r--drivers/video/b2r2/b2r2_core.c165
-rw-r--r--drivers/video/b2r2/b2r2_debug.c62
-rw-r--r--drivers/video/b2r2/b2r2_debug.h81
-rw-r--r--drivers/video/b2r2/b2r2_generic.c97
-rw-r--r--drivers/video/b2r2/b2r2_internal.h10
-rw-r--r--drivers/video/b2r2/b2r2_node_split.c213
8 files changed, 554 insertions, 430 deletions
diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile
index d368a60a10e..b0c9b374b31 100644
--- a/drivers/video/b2r2/Makefile
+++ b/drivers/video/b2r2/Makefile
@@ -1,15 +1,25 @@
# Make file for compiling and loadable module B2R2
+# TODO: This should be moved to Kconfig
+CONFIG_B2R2_DEBUG := y
+
# Fix for hardware bug requiring rectangles to be a multiple of 16 high
EXTRA_CFLAGS += -DB2R2_ROTATION_HEIGHT_BUGFIX
+# Enable fallback to generic
+EXTRA_CFLAGS += -DB2R2_GEN_OPT_MIX
+
obj-$(CONFIG_FB_B2R2) += b2r2.o
b2r2-objs = b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o
+ifdef CONFIG_B2R2_DEBUG
+EXTRA_CFLAGS += -DCONFIG_B2R2_DEBUG -DDEBUG
+b2r2-objs += b2r2_debug.o
+endif
+
ifeq ($(CONFIG_FB_B2R2),m)
obj-y += b2r2_kernel_if.o
endif
-clean-files := b2r2.o b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o modules.order built-in.o
-
+clean-files := b2r2.o b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_debug.o modules.order built-in.o
diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c
index 45ae36cc50d..e2d3685d8c4 100644
--- a/drivers/video/b2r2/b2r2_blt_main.c
+++ b/drivers/video/b2r2/b2r2_blt_main.c
@@ -39,7 +39,6 @@
#include <linux/smp.h>
#include <linux/dma-mapping.h>
-#include <asm/dma-mapping.h>
#include <linux/sched.h>
#include <linux/err.h>
@@ -50,14 +49,11 @@
#include "b2r2_mem_alloc.h"
#include "b2r2_profiler_socket.h"
#include "b2r2_timing.h"
+#include "b2r2_debug.h"
-#define B2R2_HEAP_SIZE 4 * PAGE_SIZE
+#define B2R2_HEAP_SIZE (4 * PAGE_SIZE)
static u32 b2r2_heap_size = 31 * PAGE_SIZE;
-//#define B2R2_GENERIC_BLT
-//#define B2R2_OPT_BLT
-#define B2R2_GEN_OPT_MIX
-
/*
* TODO:
* Implementation of query cap
@@ -68,13 +64,6 @@ static u32 b2r2_heap_size = 31 * PAGE_SIZE;
* Support read of many report records at once.
*/
-
-#ifndef CONFIG_FB
-/* Dummy definition of extern variables */
-struct fb_info *registered_fb[FB_MAX];
-int num_registered_fb = 0;
-#endif
-
/**
* b2r2_blt_dev - Our device, /dev/b2r2_blt
*/
@@ -272,7 +261,7 @@ static int b2r2_blt_open(struct inode *inode, struct file *filp)
int ret = 0;
struct b2r2_blt_instance *instance;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
inc_stat(&stat_n_in_open);
@@ -280,7 +269,7 @@ static int b2r2_blt_open(struct inode *inode, struct file *filp)
instance = (struct b2r2_blt_instance *)
kmalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
- dev_err(b2r2_blt_device(), "%s: Failed to alloc\n", __func__);
+ b2r2_log_err("%s: Failed to alloc\n", __func__);
goto instance_alloc_failed;
}
memset(instance, 0, sizeof(*instance));
@@ -316,7 +305,7 @@ static int b2r2_blt_release(struct inode *inode, struct file *filp)
int ret;
struct b2r2_blt_instance *instance;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
inc_stat(&stat_n_in_release);
@@ -325,14 +314,14 @@ static int b2r2_blt_release(struct inode *inode, struct file *filp)
/* Finish all outstanding requests */
ret = b2r2_blt_synch(instance, 0);
if (ret < 0)
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: b2r2_blt_sync failed with %d\n", __func__, ret);
/* Now cancel any remaining outstanding request */
if (instance->no_of_active_requests) {
struct b2r2_core_job *job;
- dev_warn(b2r2_blt_device(), "%s: %d active requests\n",
+ b2r2_log_warn("%s: %d active requests\n",
__func__, instance->no_of_active_requests);
/* Find and cancel all jobs belonging to us */
@@ -345,7 +334,7 @@ static int b2r2_blt_release(struct inode *inode, struct file *filp)
job = b2r2_core_job_find_first_with_tag((int) instance);
}
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: %d active requests after cancel\n",
__func__, instance->no_of_active_requests);
}
@@ -392,7 +381,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
/** Process actual ioctl */
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Get the instance from the file structure */
instance = (struct b2r2_blt_instance *) file->private_data;
@@ -405,7 +394,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
struct b2r2_blt_request *request =
kmalloc(sizeof(*request), GFP_KERNEL);
if (!request) {
- dev_err(b2r2_blt_device(), "%s: Failed to alloc mem\n",
+ b2r2_log_err("%s: Failed to alloc mem\n",
__func__);
return -ENOMEM;
}
@@ -421,7 +410,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
/* Get the user data */
if (copy_from_user(&request->user_req, (void *)arg,
sizeof(request->user_req))) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_err(
"%s: copy_from_user failed\n",
__func__);
kfree(request);
@@ -437,12 +426,12 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
ret = b2r2_blt(instance, request);
#elif defined(B2R2_GEN_OPT_MIX)
ret = b2r2_blt(instance, request);
- if (ret < 0) {
+ if (ret == -ENOSYS) {
struct b2r2_blt_request *request_gen;
- printk(KERN_INFO "\nb2r2_blt=%d Going generic.\n", ret);
+ b2r2_log_info("b2r2_blt=%d Going generic.\n", ret);
request_gen = kmalloc(sizeof(*request_gen), GFP_KERNEL);
if (!request_gen) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_err(
"%s: Failed to alloc mem for request_gen\n", __func__);
return -ENOMEM;
}
@@ -458,7 +447,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
/* Get the user data */
if (copy_from_user(&request_gen->user_req, (void *)arg,
sizeof(request_gen->user_req))) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_err(
"%s: copy_from_user failed\n",
__func__);
kfree(request_gen);
@@ -468,7 +457,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
request_gen->profile = is_profiler_registered_approx();
ret = b2r2_generic_blt(instance, request_gen);
- printk(KERN_INFO "\nb2r2_generic_blt=%d Generic done.\n", ret);
+ b2r2_log_info("\nb2r2_generic_blt=%d Generic done.\n", ret);
}
#endif
break;
@@ -491,7 +480,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
/* Get the user data */
if (copy_from_user(&query_cap, (void *)arg,
sizeof(query_cap))) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_err(
"%s: copy_from_user failed\n",
__func__);
return -EFAULT;
@@ -503,7 +492,7 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
/* Return data to user */
if (copy_to_user((void *)arg, &query_cap,
sizeof(query_cap))) {
- dev_err(b2r2_blt_device(), "%s: copy_to_user failed\n",
+ b2r2_log_err("%s: copy_to_user failed\n",
__func__);
return -EFAULT;
}
@@ -512,13 +501,16 @@ static int b2r2_blt_ioctl(struct inode *inode, struct file *file,
default:
/* Unknown command */
- dev_err(b2r2_blt_device(),
+ b2r2_log_err(
"%s: Unknown cmd %d\n", __func__, cmd);
ret = -EINVAL;
break;
}
+ if (ret < 0)
+ b2r2_log_err("EC %d\n", -ret);
+
return ret;
}
@@ -536,7 +528,7 @@ static unsigned b2r2_blt_poll(struct file *filp, poll_table *wait)
struct b2r2_blt_instance *instance;
unsigned int mask = 0;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Get the instance from the file structure */
instance = (struct b2r2_blt_instance *) filp->private_data;
@@ -568,7 +560,7 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
struct b2r2_blt_request *request;
struct b2r2_blt_report report;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Get the instance from the file structure */
instance = (struct b2r2_blt_instance *) filp->private_data;
@@ -592,7 +584,7 @@ static ssize_t b2r2_blt_read(struct file *filp, char __user *buf, size_t count,
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- dev_dbg(b2r2_blt_device(), "%s - Going to sleep\n", __func__);
+ b2r2_log_info("%s - Going to sleep\n", __func__);
if (wait_event_interruptible(
instance->report_list_waitq,
!is_report_list_empty(instance)))
@@ -686,13 +678,13 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
thread_runtime_at_start = (u32)task_sched_runtime(current);
}
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
inc_stat(&stat_n_in_blt);
/* Debug prints of incoming request */
- dev_dbg(b2r2_blt_device(),
- "src.fmt=%d src.buf={%d,%d,%d} "
+ b2r2_log_info(
+ "src.fmt=%#010x src.buf={%d,%d,%d} "
"src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
request->user_req.src_img.fmt,
request->user_req.src_img.buf.type,
@@ -704,8 +696,8 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->user_req.src_rect.y,
request->user_req.src_rect.width,
request->user_req.src_rect.height);
- dev_dbg(b2r2_blt_device(),
- "dst.fmt=%d dst.buf={%d,%d,%d} "
+ b2r2_log_info(
+ "dst.fmt=%#010x dst.buf={%d,%d,%d} "
"dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n",
request->user_req.dst_img.fmt,
request->user_req.dst_img.buf.type,
@@ -750,7 +742,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.src_img.buf,
&request->src_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve src buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -761,7 +753,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.src_mask.buf,
&request->src_mask_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve src mask buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -772,7 +764,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.dst_img.buf,
&request->dst_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve dst buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -780,7 +772,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
}
/* Debug prints of resolved buffers */
- dev_dbg(b2r2_blt_device(), "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->src_resolved.physical_address,
request->src_resolved.virtual_address,
request->src_resolved.is_pmem,
@@ -789,7 +781,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->src_resolved.file_virtual_start,
request->src_resolved.file_len);
- dev_dbg(b2r2_blt_device(), "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->dst_resolved.physical_address,
request->dst_resolved.virtual_address,
request->dst_resolved.is_pmem,
@@ -804,12 +796,12 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
&request->node_split_job);
if (ret == -ENOSYS) {
/* There was no optimized path for this request */
- dev_dbg(b2r2_blt_device(),
+ b2r2_log_info(
"%s: No optimized path for request\n", __func__);
goto no_optimized_path;
} else if (ret < 0) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to analyze request, ret = %d\n",
__func__, ret);
#ifdef CONFIG_DEBUG_FS
@@ -817,14 +809,14 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
/*Failed, dump job to dmesg */
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- dev_info(b2r2_blt_device(),
+ b2r2_log_info(
"%s: Analyze failed for:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- dev_info(b2r2_blt_device(), "%s", Buf);
+ b2r2_log_info("%s", Buf);
kfree(Buf);
} else {
- dev_info(b2r2_blt_device(), "Unable to print the request. "
+ b2r2_log_info("Unable to print the request. "
"Message buffer allocation failed.\n");
}
}
@@ -836,7 +828,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
#ifdef B2R2_USE_NODE_GEN
request->first_node = b2r2_blt_alloc_nodes(node_count);
if (request->first_node == NULL) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -844,7 +836,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
#else
ret = b2r2_node_alloc(node_count, &(request->first_node));
if (ret < 0 || request->first_node == NULL) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -856,7 +848,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
request->first_node);
if (ret < 0) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to perform node split, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -906,7 +898,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
#endif
/* Submit the job */
- dev_dbg(b2r2_blt_device(), "%s: Submitting job\n", __func__);
+ b2r2_log_info("%s: Submitting job\n", __func__);
inc_stat(&stat_n_in_blt_add);
@@ -923,7 +915,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt_add);
if (request_id < 0) {
- dev_warn(b2r2_blt_device(), "%s: Failed to add job, ret = %d\n",
+ b2r2_log_warn("%s: Failed to add job, ret = %d\n",
__func__, request_id);
ret = request_id;
spin_unlock(&instance->lock);
@@ -937,7 +929,7 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
/* Wait for the job to be done if synchronous */
if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) {
- dev_dbg(b2r2_blt_device(), "%s: Synchronous, waiting\n",
+ b2r2_log_info("%s: Synchronous, waiting\n",
__func__);
inc_stat(&stat_n_in_blt_wait);
@@ -947,11 +939,11 @@ static int b2r2_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to wait job, ret = %d\n",
__func__, ret);
else
- dev_dbg(b2r2_blt_device(),
+ b2r2_log_info(
"%s: Synchronous wait done\n", __func__);
ret = 0;
}
@@ -990,7 +982,7 @@ wrong_size:
job_release(&request->job);
dec_stat(&stat_n_jobs_released);
if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret)
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s returns with error %d\n", __func__, ret);
dec_stat(&stat_n_in_blt);
@@ -1007,7 +999,7 @@ wrong_size:
static void tile_job_callback_gen(struct b2r2_core_job *job)
{
if (b2r2_blt_device())
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
@@ -1015,7 +1007,7 @@ static void tile_job_callback_gen(struct b2r2_core_job *job)
#ifdef CONFIG_DEBUG_FS
/* Notify if a tile job is cancelled */
if (job->job_state == B2R2_CORE_JOB_CANCELED) {
- dev_info(b2r2_blt_device(), "%s: Tile job cancelled:\n", __func__);
+ b2r2_log_info("%s: Tile job cancelled:\n", __func__);
}
#endif
@@ -1036,7 +1028,7 @@ static void job_callback_gen(struct b2r2_core_job *job)
container_of(job, struct b2r2_blt_request, job);
if (b2r2_blt_device())
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
@@ -1079,13 +1071,13 @@ static void job_callback_gen(struct b2r2_core_job *job)
if (job->job_state == B2R2_CORE_JOB_CANCELED) {
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- dev_info(b2r2_blt_device(), "%s: Job cancelled:\n", __func__);
+ b2r2_log_info("%s: Job cancelled:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- dev_info(b2r2_blt_device(), "%s", Buf);
+ b2r2_log_info("%s", Buf);
kfree(Buf);
} else {
- dev_info(b2r2_blt_device(), "Unable to print the request. "
+ b2r2_log_info("Unable to print the request. "
"Message buffer allocation failed.\n");
}
}
@@ -1107,7 +1099,7 @@ static void tile_job_release_gen(struct b2r2_core_job *job)
{
inc_stat(&stat_n_jobs_released);
- dev_dbg(b2r2_blt_device(), "%s, first_node_address=0x%.8x, ref_count=%d\n",
+ b2r2_log_info("%s, first_node_address=0x%.8x, ref_count=%d\n",
__func__, job->first_node_address, job->ref_count);
/* Release memory for the job */
@@ -1127,7 +1119,7 @@ static void job_release_gen(struct b2r2_core_job *job)
inc_stat(&stat_n_jobs_released);
- dev_dbg(b2r2_blt_device(), "%s, first_node=%p, ref_count=%d\n",
+ b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
__func__, request->first_node, request->job.ref_count);
/* Free nodes */
@@ -1188,13 +1180,13 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
memset(work_bufs, 0, sizeof(work_bufs));
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
inc_stat(&stat_n_in_blt);
/* Debug prints of incoming request */
- dev_dbg(b2r2_blt_device(),
- "src.fmt=%d flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
+ b2r2_log_info(
+ "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n"
"src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n",
request->user_req.src_img.fmt,
request->user_req.flags,
@@ -1207,8 +1199,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->user_req.src_rect.y,
request->user_req.src_rect.width,
request->user_req.src_rect.height);
- dev_dbg(b2r2_blt_device(),
- "dst.fmt=%d dst.buf={%d,%d,0x%.8x}\n"
+ b2r2_log_info(
+ "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n"
"dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n"
"dst_clip_rect={%d,%d,%d,%d}\n",
request->user_req.dst_img.fmt,
@@ -1228,7 +1220,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Check structure size (sanity check) */
if (request->user_req.size != sizeof(request->user_req)) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Wrong request size %d, should be %d\n",
__func__, request->user_req.size,
sizeof(request->user_req));
@@ -1242,7 +1234,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = wait_event_interruptible(instance->synch_done_waitq,
!is_synching(instance));
if (ret) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Sync wait interrupted, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -1258,7 +1250,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.src_img.buf,
&request->src_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve src buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -1269,7 +1261,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.src_mask.buf,
&request->src_mask_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve src mask buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -1280,7 +1272,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = resolve_buf(&request->user_req.dst_img.buf,
&request->dst_resolved);
if (ret < 0) {
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Resolve dst buf failed, %d\n",
__func__, ret);
ret = -EAGAIN;
@@ -1288,7 +1280,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
}
/* Debug prints of resolved buffers */
- dev_dbg(b2r2_blt_device(), "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info("src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->src_resolved.physical_address,
request->src_resolved.virtual_address,
request->src_resolved.is_pmem,
@@ -1297,7 +1289,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->src_resolved.file_virtual_start,
request->src_resolved.file_len);
- dev_dbg(b2r2_blt_device(), "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
+ b2r2_log_info("dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n",
request->dst_resolved.physical_address,
request->dst_resolved.virtual_address,
request->dst_resolved.is_pmem,
@@ -1310,7 +1302,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
ret = b2r2_generic_analyze(request, &tmp_buf_width,
&tmp_buf_height, &tmp_buf_count, &node_count);
if (ret < 0) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to analyze request, ret = %d\n",
__func__, ret);
#ifdef CONFIG_DEBUG_FS
@@ -1318,14 +1310,14 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/*Failed, dump job to dmesg */
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- dev_info(b2r2_blt_device(),
+ b2r2_log_info(
"%s: Analyze failed for:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- dev_info(b2r2_blt_device(), "%s", Buf);
+ b2r2_log_info("%s", Buf);
kfree(Buf);
} else {
- dev_info(b2r2_blt_device(), "Unable to print the request. "
+ b2r2_log_info("Unable to print the request. "
"Message buffer allocation failed.\n");
}
}
@@ -1337,7 +1329,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
#ifdef B2R2_USE_NODE_GEN
request->first_node = b2r2_blt_alloc_nodes(node_count);
if (request->first_node == NULL) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -1345,7 +1337,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
#else
ret = b2r2_node_alloc(node_count, &(request->first_node));
if (ret < 0 || request->first_node == NULL) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to allocate nodes, ret = %d\n",
__func__, ret);
goto generate_nodes_failed;
@@ -1357,7 +1349,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
void *virt;
work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4;
- /*printk(KERN_INFO "b2r2::%s: allocating %d bytes\n", __func__,
+ /*b2r2_log_info("%s: allocating %d bytes\n", __func__,
work_bufs[i].size);*/
virt = dma_alloc_coherent(b2r2_blt_device(),
@@ -1372,7 +1364,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
work_bufs[i].virt_addr = virt;
memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size);
- /*printk(KERN_INFO "b2r2::%s: phys=0x%.8x, virt=%p\n", __func__,
+ /*b2r2_log_info("%s: phys=0x%.8x, virt=%p\n", __func__,
work_bufs[i].phys_addr,
work_bufs[i].virt_addr);*/
}
@@ -1380,7 +1372,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
request->first_node, &work_bufs[0], tmp_buf_count);
if (ret < 0) {
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to perform generic configure, ret = %d\n",
__func__, ret);
goto generic_conf_failed;
@@ -1462,14 +1454,16 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Skip this tile. Do not abort, just hope for better luck
* with rest of the tiles. Memory might become available.
*/
- dev_dbg(b2r2_blt_device(), "%s: Failed to alloc job. "
+ b2r2_log_info("%s: Failed to alloc job. "
"Skipping tile at (x, y)=(%d, %d)\n", __func__, x, y);
continue;
}
tile_job->tag = request->job.tag;
tile_job->prio = request->job.prio;
- tile_job->first_node_address = request->job.first_node_address;
- tile_job->last_node_address = request->job.last_node_address;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
tile_job->callback = tile_job_callback_gen;
tile_job->release = tile_job_release_gen;
/* Work buffers and nodes are pre-allocated */
@@ -1484,12 +1478,13 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* Only a part of the tile can be written */
dst_rect_tile.width = dst_rect_width - x;
}
- /* Where applicable, calculate area in src buffer that is needed
- * to generate the specified part of destination rectangle.
- */
- b2r2_generic_set_areas(request, request->first_node, &dst_rect_tile);
+ /* Where applicable, calculate area in src buffer that
+ is needed to generate the specified part of
+ destination rectangle. */
+ b2r2_generic_set_areas(request, request->first_node,
+ &dst_rect_tile);
/* Submit the job */
- dev_dbg(b2r2_blt_device(), "%s: Submitting job\n", __func__);
+ b2r2_log_info("%s: Submitting job\n", __func__);
inc_stat(&stat_n_in_blt_add);
@@ -1500,7 +1495,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt_add);
if (request_id < 0) {
- dev_warn(b2r2_blt_device(), "%s: Failed to add tile job, ret = %d\n",
+ b2r2_log_warn("%s: "
+ "Failed to add tile job, ret = %d\n",
__func__, request_id);
ret = request_id;
spin_unlock(&instance->lock);
@@ -1512,7 +1508,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
spin_unlock(&instance->lock);
/* Wait for the job to be done */
- dev_dbg(b2r2_blt_device(), "%s: Synchronous, waiting\n",
+ b2r2_log_info("%s: Synchronous, waiting\n",
__func__);
inc_stat(&stat_n_in_blt_wait);
@@ -1522,14 +1518,16 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to wait job, ret = %d\n",
__func__, ret);
else {
- dev_dbg(b2r2_blt_device(),
- "%s: Synchronous wait done\n", __func__);
+ b2r2_log_info(
+ "%s: Synchronous wait done\n",
+ __func__);
- nsec_active_in_b2r2 += tile_job->nsec_active_in_hw;
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
}
/* Release matching the addref in b2r2_core_job_add */
b2r2_core_job_release(tile_job, __func__);
@@ -1547,15 +1545,17 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
*/
tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL);
if (tile_job == NULL) {
- /**/
- dev_dbg(b2r2_blt_device(), "%s: Failed to alloc job. "
- "Skipping tile at (x, y)=(%d, %d)\n", __func__, x, y);
+ b2r2_log_info("%s: Failed to alloc job. "
+ "Skipping tile at (x, y)=(%d, %d)\n",
+ __func__, x, y);
continue;
}
tile_job->tag = request->job.tag;
tile_job->prio = request->job.prio;
- tile_job->first_node_address = request->job.first_node_address;
- tile_job->last_node_address = request->job.last_node_address;
+ tile_job->first_node_address =
+ request->job.first_node_address;
+ tile_job->last_node_address =
+ request->job.last_node_address;
tile_job->callback = tile_job_callback_gen;
tile_job->release = tile_job_release_gen;
tile_job->acquire_resources = job_acquire_resources_gen;
@@ -1573,13 +1573,12 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
/* y is now the last row */
dst_rect_tile.y = y;
dst_rect_tile.height = dst_rect_height - y;
- b2r2_generic_set_areas(request, request->first_node, &dst_rect_tile);
+ b2r2_generic_set_areas(request, request->first_node,
+ &dst_rect_tile);
- dev_dbg(b2r2_blt_device(), "%s: Submitting job\n", __func__);
+ b2r2_log_info("%s: Submitting job\n", __func__);
inc_stat(&stat_n_in_blt_add);
- //printk("%s last row (x,y)=(%d, %d)\n", __func__, x, y);
-
spin_lock(&instance->lock);
if (x + tmp_buf_width < dst_rect_width) {
request_id = b2r2_core_job_add(tile_job);
@@ -1593,7 +1592,7 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt_add);
if (request_id < 0) {
- dev_warn(b2r2_blt_device(), "%s: Failed to add tile job, ret = %d\n",
+ b2r2_log_warn("%s: Failed to add tile job, ret = %d\n",
__func__, request_id);
ret = request_id;
spin_unlock(&instance->lock);
@@ -1603,30 +1602,33 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
inc_stat(&stat_n_jobs_added);
spin_unlock(&instance->lock);
- dev_dbg(b2r2_blt_device(), "%s: Synchronous, waiting\n",
+ b2r2_log_info("%s: Synchronous, waiting\n",
__func__);
inc_stat(&stat_n_in_blt_wait);
if (x + tmp_buf_width < dst_rect_width) {
ret = b2r2_core_job_wait(tile_job);
} else {
- /* Last tile. Wait for the job-struct from the request. */
+ /* This is the last tile. Wait for the job-struct from
+ the request. */
ret = b2r2_core_job_wait(&request->job);
}
dec_stat(&stat_n_in_blt_wait);
if (ret < 0 && ret != -ENOENT)
- dev_warn(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to wait job, ret = %d\n",
__func__, ret);
else {
- dev_dbg(b2r2_blt_device(),
+ b2r2_log_info(
"%s: Synchronous wait done\n", __func__);
if (x + tmp_buf_width < dst_rect_width)
- nsec_active_in_b2r2 += tile_job->nsec_active_in_hw;
+ nsec_active_in_b2r2 +=
+ tile_job->nsec_active_in_hw;
else
- nsec_active_in_b2r2 += request->job.nsec_active_in_hw;
+ nsec_active_in_b2r2 +=
+ request->job.nsec_active_in_hw;
}
/* Release matching the addref in b2r2_core_job_add.
@@ -1655,8 +1657,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_blt);
for (i = 0; i < tmp_buf_count; i++) {
- /*printk(KERN_INFO "b2r2::%s: freeing %d bytes\n",
- __func__, work_bufs[i].size);*/
+ b2r2_log_info("%s: freeing %d bytes\n",
+ __func__, work_bufs[i].size);
dma_free_coherent(b2r2_blt_device(),
work_bufs[i].size,
work_bufs[i].virt_addr,
@@ -1666,7 +1668,8 @@ static int b2r2_generic_blt(struct b2r2_blt_instance *instance,
if (request->profile) {
request->nsec_active_in_cpu =
- (s32)((u32)task_sched_runtime(current) - thread_runtime_at_start);
+ (s32)((u32)task_sched_runtime(current) -
+ thread_runtime_at_start);
request->total_time_nsec =
(s32)(b2r2_get_curr_nsec() - request->start_time_nsec);
request->job.nsec_active_in_hw = nsec_active_in_b2r2;
@@ -1681,10 +1684,9 @@ exit_dry_run:
generic_conf_failed:
alloc_work_bufs_failed:
for (i = 0; i < 4; i++) {
- if (work_bufs[i].virt_addr != 0)
- {
- /*printk(KERN_INFO "b2r2::%s: freeing %d bytes\n",
- __func__, work_bufs[i].size);*/
+ if (work_bufs[i].virt_addr != 0) {
+ b2r2_log_info("%s: freeing %d bytes\n",
+ __func__, work_bufs[i].size);
dma_free_coherent(b2r2_blt_device(),
work_bufs[i].size,
work_bufs[i].virt_addr,
@@ -1713,7 +1715,7 @@ wrong_size:
dec_stat(&stat_n_in_blt);
- //printk(KERN_INFO "b2r2:%s error ret=%d", __func__, ret);
+ b2r2_log_info("b2r2:%s error ret=%d", __func__, ret);
return ret;
}
@@ -1728,7 +1730,7 @@ static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
int request_id)
{
int ret = 0;
- dev_dbg(b2r2_blt_device(), "%s, request_id=%d\n", __func__, request_id);
+ b2r2_log_info("%s, request_id=%d\n", __func__, request_id);
if (request_id == 0) {
/* Wait for all requests */
@@ -1763,7 +1765,7 @@ static int b2r2_blt_synch(struct b2r2_blt_instance *instance,
dec_stat(&stat_n_in_synch_job);
}
- dev_dbg(b2r2_blt_device(),
+ b2r2_log_info(
"%s, request_id=%d, returns %d\n", __func__, request_id, ret);
return ret;
@@ -1793,7 +1795,7 @@ static void job_callback(struct b2r2_core_job *job)
container_of(job, struct b2r2_blt_request, job);
if (b2r2_blt_device())
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Local addref / release within this func */
b2r2_core_job_addref(job, __func__);
@@ -1835,13 +1837,13 @@ static void job_callback(struct b2r2_core_job *job)
if (job->job_state == B2R2_CORE_JOB_CANCELED) {
char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL);
- dev_info(b2r2_blt_device(), "%s: Job cancelled:\n", __func__);
+ b2r2_log_info("%s: Job cancelled:\n", __func__);
if (Buf != NULL) {
sprintf_req(request, Buf, sizeof(char) * 4096);
- dev_info(b2r2_blt_device(), "%s", Buf);
+ b2r2_log_info("%s", Buf);
kfree(Buf);
} else {
- dev_info(b2r2_blt_device(), "Unable to print the request. "
+ b2r2_log_info("Unable to print the request. "
"Message buffer allocation failed.\n");
}
}
@@ -1869,7 +1871,7 @@ static void job_release(struct b2r2_core_job *job)
inc_stat(&stat_n_jobs_released);
- dev_dbg(b2r2_blt_device(), "%s, first_node=%p, ref_count=%d\n",
+ b2r2_log_info("%s, first_node=%p, ref_count=%d\n",
__func__, request->first_node, request->job.ref_count);
b2r2_node_split_cancel(&request->node_split_job);
@@ -1903,7 +1905,7 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
int ret;
int i;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* We do not support atomic allocations (I think...) */
if (atomic)
@@ -1912,7 +1914,7 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
for (i = 0; i < request->buf_count; i++) {
void *virt;
- dev_dbg(b2r2_blt_device(), "b2r2::%s: allocating %d bytes\n",
+ b2r2_log_info("%s: allocating %d bytes\n",
__func__, request->bufs[i].size);
virt = dma_alloc_coherent(b2r2_blt_device(),
@@ -1926,7 +1928,7 @@ static int job_acquire_resources(struct b2r2_core_job *job, bool atomic)
request->bufs[i].virt_addr = virt;
- dev_dbg(b2r2_blt_device(), "b2r2::%s: phys=%p, virt=%p\n",
+ b2r2_log_info("%s: phys=%p, virt=%p\n",
__func__, (void *)request->bufs[i].phys_addr,
request->bufs[i].virt_addr);
@@ -1957,7 +1959,7 @@ static void job_release_resources(struct b2r2_core_job *job, bool atomic)
struct b2r2_blt_request *request =
container_of(job, struct b2r2_blt_request, job);
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Early release of nodes
FIXME: If nodes are to be reused we don't want to release here */
@@ -1974,7 +1976,7 @@ static void job_release_resources(struct b2r2_core_job *job, bool atomic)
/* Free any temporary buffers */
for (i = 0; i < request->buf_count; i++) {
- dev_dbg(b2r2_blt_device(), "b2r2::%s: freeing %d bytes\n",
+ b2r2_log_info("%s: freeing %d bytes\n",
__func__, request->bufs[i].size);
dma_free_coherent(b2r2_blt_device(),
request->bufs[i].size,
@@ -2030,8 +2032,8 @@ static int resolve_buf(struct b2r2_blt_buf *buf,
/* FD + OFFSET type */
case B2R2_BLT_PTR_FD_OFFSET: {
- /* TODO: Do we need to check if the process is allowed to read/write
- (depending on if it's dst or src) to the file? */
+ /* TODO: Do we need to check if the process is allowed to
+ read/write (depending on if it's dst or src) to the file? */
struct file *file;
int put_needed;
int i;
@@ -2053,11 +2055,13 @@ static int resolve_buf(struct b2r2_blt_buf *buf,
} else
#endif
{
+ /* Will be set to 0 if a matching dev is found */
+ ret = -EINVAL;
file = fget_light(buf->fd, &put_needed);
if (file == NULL)
return -EINVAL;
-
+#ifdef CONFIG_FB
if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
/* This is a frame buffer device, find fb_info
(OK to do it like this, no locking???) */
@@ -2082,13 +2086,13 @@ static int resolve_buf(struct b2r2_blt_buf *buf,
(void *)(resolved->file_virtual_start +
buf->offset);
+ ret = 0;
break;
}
}
- if (i == num_registered_fb)
- ret = -EINVAL;
- } else
- ret = -EINVAL;
+ }
+#endif
+
fput_light(file, put_needed);
}
@@ -2102,7 +2106,7 @@ static int resolve_buf(struct b2r2_blt_buf *buf,
}
default:
- dev_err(b2r2_blt_device(),
+ b2r2_log_warn(
"%s: Failed to resolve buf type %d\n",
__func__, buf->type);
@@ -2129,11 +2133,11 @@ static bool is_mio_pmem(struct b2r2_resolved_buf *buf)
return false;
inode = buf->filep->f_dentry->d_inode;
- /* It's a bit risky identifying mio pmem on the minor number given that pmem
- device's minor numbers depends on the order in which they where created. It
- seems to work however so no problem at this time. This type of information
- should be gotten from the pmem driver directly but that is not possible with
- the current pmem driver. */
+ /* It's a bit risky identifying mio pmem on the minor number given that
+ pmem device's minor numbers depends on the order in which they where
+ created. It seems to work however so no problem at this time. This
+ type of information should be gotten from the pmem driver directly
+ but that is not possible with the current pmem driver. */
if (MISC_MAJOR == imajor(inode) && 1 == iminor(inode))
return true;
else
@@ -2145,7 +2149,8 @@ static bool is_mio_pmem(struct b2r2_resolved_buf *buf)
*
* @buf: User buffer specification
* @resolved_buf: Gathered info (physical address etc.) about buffer
- * @is_dst: true if the buffer is a destination buffer, false if the buffer is a source buffer.
+ * @is_dst: true if the buffer is a destination buffer, false if the buffer is a
+ * source buffer.
*/
static void sync_buf(struct b2r2_blt_buf *buf,
struct b2r2_resolved_buf *resolved,
@@ -2162,26 +2167,29 @@ static void sync_buf(struct b2r2_blt_buf *buf,
start_phys = resolved->physical_address;
end_phys = resolved->physical_address + buf->len;
- /* TODO: Very ugly. We should find out whether the memory is coherent in some generic way
- but cache handling will be rewritten soon so there is no use spending time on it. In the
- new design this will probably not be a problem. */
+ /* TODO: Very ugly. We should find out whether the memory is coherent in
+ some generic way but cache handling will be rewritten soon so there
+ is no use spending time on it. In the new design this will probably
+ not be a problem. */
/* Frame buffer and mio pmem memory is coherent, at least now. */
- if (!resolved->is_pmem || (!flush_mio_pmem && is_mio_pmem(resolved)))
- {
- /* Drain the write buffers as they are not always part of the coherent concept. */
+ if (!resolved->is_pmem ||
+ (!flush_mio_pmem && is_mio_pmem(resolved))) {
+ /* Drain the write buffers as they are not always part of the
+ coherent concept. */
wmb();
return;
}
- /* The virtual address to a pmem buffer is retrieved from ioremap, not sure if it's
- ok to use such an address as a kernel virtual address. When doing it at a higher
- level such as dma_map_single it triggers an error but at lower levels such as
- dmac_clean_range it seems to work, hence the low level stuff. */
+ /* The virtual address to a pmem buffer is retrieved from ioremap, not
+ sure if it's ok to use such an address as a kernel virtual address.
+ When doing it at a higher level such as dma_map_single it triggers an
+ error but at lower levels such as dmac_clean_range it seems to work,
+ hence the low level stuff. */
if (is_dst) {
- /* According to ARM's docs you must clean before invalidating (ie flush) to
- avoid loosing data. */
+ /* According to ARM's docs you must clean before invalidating
+ (ie flush) to avoid loosing data. */
/* Flush L1 cache */
#ifdef CONFIG_SMP
@@ -2301,7 +2309,7 @@ static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
"prio: %d\n",
request->user_req.transform);
dev_size += sprintf(buf + dev_size,
- "src_img.fmt: %d\n",
+ "src_img.fmt: %#010x\n",
request->user_req.src_img.fmt);
dev_size += sprintf(buf + dev_size,
"src_img.buf: {type=%d,fd=%d,offset=%d,len=%d}\n",
@@ -2315,7 +2323,7 @@ static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
request->user_req.src_img.height,
request->user_req.src_img.pitch);
dev_size += sprintf(buf + dev_size,
- "src_mask.fmt: %d\n",
+ "src_mask.fmt: %#010x\n",
request->user_req.src_mask.fmt);
dev_size += sprintf(buf + dev_size,
"src_mask.buf: {type=%d,fd=%d,offset=%d,len=%d}\n",
@@ -2339,7 +2347,7 @@ static int sprintf_req(struct b2r2_blt_request *request, char *buf, int size)
(unsigned long) request->user_req.src_color);
dev_size += sprintf(buf + dev_size,
- "dst_img.fmt: %d\n",
+ "dst_img.fmt: %#010x\n",
request->user_req.dst_img.fmt);
dev_size += sprintf(buf + dev_size,
"dst_img.buf: {type=%d,fd=%d,offset=%d,len=%d}\n",
@@ -2791,7 +2799,7 @@ int b2r2_blt_module_init(void)
b2r2_blt_misc_dev.this_device->coherent_dma_mask = 0xFFFFFFFF;
b2r2_blt_dev = &b2r2_blt_misc_dev;
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Initialize memory allocator */
/* FIXME: Should be before misc_register,
@@ -2851,7 +2859,7 @@ void b2r2_blt_module_exit(void)
}
#endif
if (b2r2_blt_dev) {
- dev_dbg(b2r2_blt_device(), "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
b2r2_mem_exit();
b2r2_blt_dev = NULL;
misc_deregister(&b2r2_blt_misc_dev);
diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c
index 8007f2d82f8..1dd82038b97 100644
--- a/drivers/video/b2r2/b2r2_core.c
+++ b/drivers/video/b2r2/b2r2_core.c
@@ -63,15 +63,10 @@
#include "b2r2_internal.h"
#include "b2r2_profiler_api.h"
#include "b2r2_timing.h"
+#include "b2r2_debug.h"
#include <mach/prcmu-fw-api.h>
-/* TBD: Remove from here (split_init from b2r2_blt_main) */
-int __init b2r2_blt_module_init(void);
-void __exit b2r2_blt_module_exit(void);
-
-
-
/**
* B2R2_DRIVER_TIMEOUT_VALUE - Busy loop timeout after soft reset
*/
@@ -247,9 +242,9 @@ struct b2r2_core {
struct b2r2_memory_map *hw;
- struct clk* b2r2_clock;
+ struct clk *b2r2_clock;
- bool b2r2_clock_on;
+ bool b2r2_clock_on;
struct device *log_dev;
@@ -392,7 +387,7 @@ void printk_ar(struct b2r2_core_job *job)
i = (i + 1) % ARRAY_SIZE(b2r2_core.ar)) {
struct addref_release *ar = &b2r2_core.ar[i];
if (!job || job == ar->job)
- dev_info(b2r2_core.log_dev, "%s on %p from %s,"
+ b2r2_log_info("%s on %p from %s,"
" ref = %d\n",
ar->addref ? "addref" : "release",
ar->job, ar->caller, ar->ref_count);
@@ -412,7 +407,7 @@ static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
{
u32 ref_count;
- dev_dbg(b2r2_core.log_dev, "%s (%p) (from %s)\n",
+ b2r2_log_info("%s (%p) (from %s)\n",
__func__, job, caller);
/* Sanity checks */
@@ -421,7 +416,7 @@ static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
if (job->start_sentinel != START_SENTINEL ||
job->end_sentinel != END_SENTINEL ||
job->ref_count == 0 || job->ref_count > 10) {
- dev_info(b2r2_core.log_dev,
+ b2r2_log_info(
"%s: (%p) start=%X end=%X ref_count=%d\n",
__func__, job, job->start_sentinel,
job->end_sentinel, job->ref_count);
@@ -444,7 +439,7 @@ static void internal_job_addref(struct b2r2_core_job *job, const char *caller)
ar_add(job, caller, true);
#endif
- dev_dbg(b2r2_core.log_dev, "%s called from %s (%p): Ref Count is %d\n",
+ b2r2_log_info("%s called from %s (%p): Ref Count is %d\n",
__func__, caller, job, job->ref_count);
}
@@ -464,7 +459,7 @@ bool internal_job_release(struct b2r2_core_job *job, const char *caller)
u32 ref_count;
bool call_release = false;
- dev_dbg(b2r2_core.log_dev, "%s (%p) (from %s)\n",
+ b2r2_log_info("%s (%p) (from %s)\n",
__func__, job, caller);
/* Sanity checks */
@@ -473,7 +468,7 @@ bool internal_job_release(struct b2r2_core_job *job, const char *caller)
if (job->start_sentinel != START_SENTINEL ||
job->end_sentinel != END_SENTINEL ||
job->ref_count == 0 || job->ref_count > 10) {
- dev_info(b2r2_core.log_dev,
+ b2r2_log_info(
"%s: (%p) start=%X end=%X ref_count=%d\n",
__func__, job, job->start_sentinel,
job->end_sentinel, job->ref_count);
@@ -494,7 +489,7 @@ bool internal_job_release(struct b2r2_core_job *job, const char *caller)
#ifdef DEBUG_CHECK_ADDREF_RELEASE
ar_add(job, caller, false);
#endif
- dev_dbg(b2r2_core.log_dev, "%s called from %s (%p) Ref Count is %d\n",
+ b2r2_log_info("%s called from %s (%p) Ref Count is %d\n",
__func__, caller, job, ref_count);
if (!ref_count && job->release) {
@@ -537,7 +532,7 @@ int b2r2_core_job_add(struct b2r2_core_job *job)
{
unsigned long flags;
- dev_dbg(b2r2_core.log_dev, "%s (%p)\n", __func__, job);
+ b2r2_log_info("%s (%p)\n", __func__, job);
spin_lock_irqsave(&b2r2_core.lock, flags);
b2r2_core.stat_n_jobs_added++;
@@ -564,7 +559,7 @@ struct b2r2_core_job *b2r2_core_job_find(int job_id)
unsigned long flags;
struct b2r2_core_job *job;
- dev_dbg(b2r2_core.log_dev, "%s (%d)\n", __func__, job_id);
+ b2r2_log_info("%s (%d)\n", __func__, job_id);
spin_lock_irqsave(&b2r2_core.lock, flags);
/* Look through prio queue */
@@ -584,7 +579,7 @@ struct b2r2_core_job *b2r2_core_job_find_first_with_tag(int tag)
unsigned long flags;
struct b2r2_core_job *job;
- dev_dbg(b2r2_core.log_dev, "%s (%d)\n", __func__, tag);
+ b2r2_log_info("%s (%d)\n", __func__, tag);
spin_lock_irqsave(&b2r2_core.lock, flags);
/* Look through prio queue */
@@ -626,11 +621,11 @@ int b2r2_core_job_wait(struct b2r2_core_job *job)
{
int ret = 0;
- dev_dbg(b2r2_core.log_dev, "%s (%p)\n", __func__, job);
+ b2r2_log_info("%s (%p)\n", __func__, job);
/* Check that we have the job */
if (job->job_state == B2R2_CORE_JOB_IDLE) {
/* Never or not queued */
- dev_dbg(b2r2_core.log_dev, "%s: Job not queued\n", __func__);
+ b2r2_log_info("%s: Job not queued\n", __func__);
return -ENOENT;
}
@@ -640,7 +635,7 @@ int b2r2_core_job_wait(struct b2r2_core_job *job)
is_job_done(job));
if (ret)
- dev_warn(b2r2_core.log_dev,
+ b2r2_log_warn(
"%s: wait_event_interruptible returns %d, state is %d",
__func__, ret, job->job_state);
return ret;
@@ -712,12 +707,12 @@ int b2r2_core_job_cancel(struct b2r2_core_job *job)
unsigned long flags;
int ret = 0;
- dev_dbg(b2r2_core.log_dev, "%s (%p) (%d)\n", __func__,
+ b2r2_log_info("%s (%p) (%d)\n", __func__,
job, job->job_state);
/* Check that we have the job */
if (job->job_state == B2R2_CORE_JOB_IDLE) {
/* Never or not queued */
- dev_info(b2r2_core.log_dev, "%s: Job not queued\n", __func__);
+ b2r2_log_info("%s: Job not queued\n", __func__);
return -ENOENT;
}
@@ -731,24 +726,22 @@ int b2r2_core_job_cancel(struct b2r2_core_job *job)
/* LOCAL FUNCTIONS BELOW */
-
static void turn_on_clock(void)
{
- if (!b2r2_core.b2r2_clock_on) {
- clk_enable(b2r2_core.b2r2_clock);
- b2r2_core.b2r2_clock_on = true;
- }
+ if (!b2r2_core.b2r2_clock_on) {
+ clk_enable(b2r2_core.b2r2_clock);
+ b2r2_core.b2r2_clock_on = true;
+ }
}
static void turn_off_clock(void)
{
- if (b2r2_core.b2r2_clock_on) {
- clk_disable(b2r2_core.b2r2_clock);
- b2r2_core.b2r2_clock_on = false;
- }
+ if (b2r2_core.b2r2_clock_on) {
+ clk_disable(b2r2_core.b2r2_clock);
+ b2r2_core.b2r2_clock_on = false;
+ }
}
-
/**
* clock_off_timer_function() - Checks if the B2R2 clock should be turned off
*
@@ -762,7 +755,7 @@ static void clock_off_timer_function(unsigned long arg)
unsigned long j = jiffies;
#ifndef B2R2_CLOCK_ALWAYS_ON
if (time_after_eq(j, b2r2_core.clock_off_timer.expires)) {
- turn_off_clock();
+ turn_off_clock();
dev_dbg(b2r2_core.log_dev,
"B2R2 disable clock: disable b2r2 = %X\n",
(int)b2r2_core.b2r2_clock);
@@ -787,9 +780,8 @@ static void clock_enable(void)
{
b2r2_core.clock_request_count++;
if (b2r2_core.clock_request_count == 1) {
- turn_on_clock();
- dev_dbg(b2r2_core.log_dev,
- "B2R2 enable clock: enable b2r2 = %X\n",
+ turn_on_clock();
+ b2r2_log_info("core: enable clock: enable b2r2 = %X\n",
(int)b2r2_core.b2r2_clock);
}
}
@@ -808,12 +800,12 @@ static void clock_disable(void)
BUG_ON(b2r2_core.clock_request_count == 0);
b2r2_core.clock_request_count--;
- if (b2r2_core.clock_request_count == 0) {
- /* Delayed clock turn off
- (10 ms but at least 2 jiffie increments) */
- mod_timer(&b2r2_core.clock_off_timer,
+ if (b2r2_core.clock_request_count == 0) {
+ /* Delayed clock turn off
+ (10 ms but at least 2 jiffie increments) */
+ mod_timer(&b2r2_core.clock_off_timer,
jiffies + min(HZ / 100, 2));
- }
+ }
}
/**
@@ -958,7 +950,7 @@ static void timeout_work_function(struct work_struct *ptr)
struct b2r2_core_job,
list);
- dev_warn(b2r2_core.log_dev, "%s: Job timeout\n", __func__);
+ b2r2_log_warn("%s: Job timeout\n", __func__);
list_del_init(&job->list);
@@ -1047,8 +1039,7 @@ static void stop_hw_timer(struct b2r2_core_job *job)
/* Check if we have delayed the start of higher prio jobs. Can happen as queue
switching only can be done between nodes. */
- for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--)
- {
+ for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) {
struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
if (NULL == queue_active_job)
continue;
@@ -1057,8 +1048,7 @@ static void stop_hw_timer(struct b2r2_core_job *job)
}
/* Check if the job has stolen time from lower prio jobs */
- for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++)
- {
+ for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) {
struct b2r2_core_job *queue_active_job = b2r2_core.active_jobs[i];
u32 queue_active_job_hw_start_time;
@@ -1067,8 +1057,7 @@ static void stop_hw_timer(struct b2r2_core_job *job)
queue_active_job_hw_start_time = queue_active_job->hw_start_time + time_pos_offset;
- if (queue_active_job_hw_start_time < stop_time)
- {
+ if (queue_active_job_hw_start_time < stop_time) {
u32 queue_active_job_nsec_in_hw = stop_time - queue_active_job_hw_start_time;
u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw, nsec_in_hw);
@@ -1261,7 +1250,7 @@ static void check_prio_list(bool atomic)
/* No resources */
if (!atomic &&
b2r2_core.n_active_jobs == 0) {
- dev_warn(b2r2_core.log_dev,
+ b2r2_log_warn(
"%s: No resource",
__func__);
cancel_job(job);
@@ -1395,7 +1384,6 @@ static struct b2r2_core_job *find_tag_in_active_jobs(int tag)
static int hw_reset(void)
{
u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
-
clock_enable();
/* Tell B2R2 to reset */
@@ -1403,7 +1391,7 @@ static int hw_reset(void)
&b2r2_core.hw->BLT_CTL);
writel(0x00000000, &b2r2_core.hw->BLT_CTL);
- dev_dbg(b2r2_core.log_dev, "wait for B2R2 to be idle..\n");
+ b2r2_log_info("wait for B2R2 to be idle..\n");
/** Wait for B2R2 to be idle (on a timeout rather than while loop) */
while ((uTimeOut > 0) &&
@@ -1414,7 +1402,7 @@ static int hw_reset(void)
clock_disable();
if (uTimeOut == 0) {
- dev_warn(b2r2_core.log_dev,
+ b2r2_log_warn(
"error-> after software reset B2R2 is not idle\n");
return -EAGAIN;
}
@@ -1434,13 +1422,13 @@ static int hw_reset(void)
static void trigger_job(struct b2r2_core_job *job)
{
/* Debug prints */
- dev_dbg(b2r2_core.log_dev, "queue 0x%x \n", job->queue);
- dev_dbg(b2r2_core.log_dev, "BLT TRIG_IP 0x%x (first node)\n",
+ b2r2_log_info("queue 0x%x \n", job->queue);
+ b2r2_log_info("BLT TRIG_IP 0x%x (first node)\n",
job->first_node_address);
- dev_dbg(b2r2_core.log_dev, "BLT LNA_CTL 0x%x (last node)\n",
+ b2r2_log_info("BLT LNA_CTL 0x%x (last node)\n",
job->last_node_address);
- dev_dbg(b2r2_core.log_dev, "BLT TRIG_CTL 0x%x \n", job->control);
- dev_dbg(b2r2_core.log_dev, "BLT PACE_CTL 0x%x \n", job->pace_control);
+ b2r2_log_info("BLT TRIG_CTL 0x%x \n", job->control);
+ b2r2_log_info("BLT PACE_CTL 0x%x \n", job->pace_control);
reset_hw_timer(job);
job->job_state = B2R2_CORE_JOB_RUNNING;
@@ -2121,7 +2109,8 @@ static const struct file_operations debugfs_b2r2_stat_fops = {
static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
- char Buf[10+2];//10 characters hex number + newline + string terminator;
+ /* 10 characters hex number + newline + string terminator; */
+ char Buf[10+2];
size_t dev_size;
int ret = 0;
@@ -2218,10 +2207,10 @@ static int init_hw(struct platform_device *pdev)
struct resource *res;
u32 uTimeOut = B2R2_DRIVER_TIMEOUT_VALUE;
- dev_dbg(b2r2_core.log_dev, "%s started..\n", __func__);
- dev_dbg(b2r2_core.log_dev, "Map B2R2 registers...\n");
+ b2r2_log_info("%s started..\n", __func__);
+ b2r2_log_info("Map B2R2 registers...\n");
- (void)prcmu_set_hwacc(HW_ACC_B2R2, HW_ON);
+ (void)prcmu_set_hwacc(HW_ACC_B2R2, HW_ON);
/* Map B2R2 into kernel virtual memory space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2232,7 +2221,7 @@ static int init_hw(struct platform_device *pdev)
res->end - res->start + 1);
if (b2r2_core.hw == NULL) {
- dev_dbg(b2r2_core.log_dev, "%s: ioremap failed\n", __func__);
+ b2r2_log_info("%s: ioremap failed\n", __func__);
result = -ENOMEM;
goto b2r2_init_ioremap_failed;
}
@@ -2249,7 +2238,7 @@ static int init_hw(struct platform_device *pdev)
writel(readl(&b2r2_core.hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset,
&b2r2_core.hw->BLT_CTL);
- dev_dbg(b2r2_core.log_dev, "register interrupt handler..\n");
+ b2r2_log_info("register interrupt handler..\n");
/* Set up interrupt handler */
result = request_irq(IRQ_B2R2, b2r2_irq_handler, 0,
@@ -2261,12 +2250,12 @@ static int init_hw(struct platform_device *pdev)
goto b2r2_init_request_irq_failed;
}
- dev_dbg(b2r2_core.log_dev, "do a global reset..\n");
+ b2r2_log_info("do a global reset..\n");
/* Release reset */
writel(0x00000000, &b2r2_core.hw->BLT_CTL);
- dev_dbg(b2r2_core.log_dev, "wait for B2R2 to be idle..\n");
+ b2r2_log_info("wait for B2R2 to be idle..\n");
/** Wait for B2R2 to be idle (on a timeout rather than while loop) */
while ((uTimeOut > 0) &&
@@ -2274,7 +2263,7 @@ static int init_hw(struct platform_device *pdev)
B2R2BLT_STA1BDISP_IDLE) == 0x0))
uTimeOut--;
if (uTimeOut == 0) {
- dev_warn(b2r2_core.log_dev,
+ b2r2_log_warn(
"%s: B2R2 not idle after SW reset\n", __func__);
result = -EAGAIN;
goto b2r2_core_init_hw_timeout;
@@ -2308,7 +2297,7 @@ static int init_hw(struct platform_device *pdev)
#endif
clock_disable();
- dev_dbg(b2r2_core.log_dev, "%s ended..\n", __func__);
+ b2r2_log_info("%s ended..\n", __func__);
return result;
/** Recover from any error without any leaks */
@@ -2344,7 +2333,7 @@ static void exit_hw(void)
{
unsigned long flags;
- dev_dbg(b2r2_core.log_dev, "%s started..\n", __func__);
+ b2r2_log_info("%s started..\n", __func__);
#ifdef CONFIG_DEBUG_FS
/* Unregister our debugfs entries */
@@ -2367,11 +2356,11 @@ static void exit_hw(void)
clear_interrupts();
/** Free B2R2 interrupt handler */
- dev_dbg(b2r2_core.log_dev, "free interrupt handler..\n");
+ b2r2_log_info("free interrupt handler..\n");
free_irq(IRQ_B2R2, 0);
/** Unmap B2R2 registers */
- dev_dbg(b2r2_core.log_dev, "unmap b2r2 registers..\n");
+ b2r2_log_info("unmap b2r2 registers..\n");
if (b2r2_core.hw) {
iounmap(b2r2_core.hw);
@@ -2382,9 +2371,9 @@ static void exit_hw(void)
spin_unlock_irqrestore(&b2r2_core.lock, flags);
- (void)prcmu_set_hwacc(HW_ACC_B2R2, HW_OFF);
+ (void)prcmu_set_hwacc(HW_ACC_B2R2, HW_OFF);
- dev_dbg(b2r2_core.log_dev, "%s ended...\n", __func__);
+ b2r2_log_info("%s ended...\n", __func__);
}
/**
@@ -2399,8 +2388,14 @@ static int __init b2r2_probe(struct platform_device *pdev)
BUG_ON(pdev == NULL);
+ ret = b2r2_debug_init(&pdev->dev);
+ if (ret < 0) {
+ dev_err(b2r2_core.log_dev, "b2r2_debug_init failed\n");
+ goto b2r2_probe_debug_init_failed;
+ }
+
b2r2_core.log_dev = &pdev->dev;
- dev_dbg(b2r2_core.log_dev, "init started.\n");
+ b2r2_log_info("init started.\n");
/* Init spin locks */
spin_lock_init(&b2r2_core.lock);
@@ -2429,7 +2424,7 @@ static int __init b2r2_probe(struct platform_device *pdev)
b2r2_core.b2r2_clock = clk_get(&pdev->dev, "b2r2");
if (IS_ERR(b2r2_core.b2r2_clock)) {
ret = PTR_ERR(b2r2_core.b2r2_clock);
- printk(KERN_ERR "B2R2: clk_get b2r2 failed\n");
+ b2r2_log_err("clk_get b2r2 failed\n");
goto b2r2_probe_no_clk;
}
@@ -2443,8 +2438,7 @@ static int __init b2r2_probe(struct platform_device *pdev)
/** Init B2R2 hardware */
ret = init_hw(pdev);
if (ret < 0) {
- dev_dbg(b2r2_core.log_dev, "%s: init_hw() returns %d\n",
- __func__, ret);
+ b2r2_log_err("init_hw() returns %d\n", ret);
goto b2r2_probe_init_hw_fail;
}
@@ -2452,11 +2446,11 @@ static int __init b2r2_probe(struct platform_device *pdev)
or perhaps a dedicated module init c file? */
ret = b2r2_blt_module_init();
if (ret < 0) {
- dev_err(b2r2_core.log_dev, "b2r2_blt_module_init failed\n");
+ b2r2_log_err("b2r2_blt_module_init failed\n");
goto b2r2_probe_blt_init_fail;
}
- dev_dbg(b2r2_core.log_dev, "init done.\n");
+ b2r2_log_info("init done.\n");
return ret;
@@ -2471,7 +2465,8 @@ b2r2_probe_no_clk:
b2r2_core.work_queue = NULL;
b2r2_probe_no_work_queue:
- dev_dbg(b2r2_core.log_dev, "init done with errors.\n");
+ b2r2_log_info("init done with errors.\n");
+b2r2_probe_debug_init_failed:
return ret;
@@ -2519,10 +2514,12 @@ static int b2r2_remove(struct platform_device *pdev)
while (timer_pending(&b2r2_core.clock_off_timer))
mdelay(10);
- dev_dbg(b2r2_core.log_dev, "%s ended\n", __func__);
+ b2r2_log_info("%s ended\n", __func__);
b2r2_core.log_dev = NULL;
+ b2r2_debug_exit();
+
return 0;
}
@@ -2535,7 +2532,7 @@ static int b2r2_remove(struct platform_device *pdev)
*/
int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
{
- printk(KERN_INFO "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
/* Flush B2R2 work queue (call all callbacks) */
flush_workqueue(b2r2_core.work_queue);
@@ -2568,9 +2565,9 @@ int b2r2_suspend(struct platform_device *pdev, pm_message_t state)
*/
int b2r2_resume(struct platform_device *pdev)
{
- printk(KERN_INFO "%s\n", __func__);
+ b2r2_log_info("%s\n", __func__);
- return init_hw(pdev);
+ return init_hw(pdev);
}
/**
diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c
new file mode 100644
index 00000000000..76bd92210a7
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public Licence (GPL) version 2.
+ */
+
+#include "b2r2_debug.h"
+#include <linux/debugfs.h>
+
+int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+struct device *b2r2_log_dev;
+
+static struct dentry *root_dir;
+static struct dentry *log_lvl_dir;
+
+int b2r2_debug_init(struct device *log_dev)
+{
+ int i;
+ int init_val = 0;
+
+ b2r2_log_dev = log_dev;
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+ /*
+ * We want all prints to be enabled by default when using dynamic
+ * debug
+ */
+ init_val = 1;
+#endif
+
+ for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++)
+ b2r2_log_levels[i] = init_val;
+
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ /*
+ * If dynamic debug is disabled we need some other way to control the
+ * log prints
+ */
+ root_dir = debugfs_create_dir("b2r2_debug", NULL);
+ log_lvl_dir = debugfs_create_dir("logs", root_dir);
+
+ /* No need to save the files, they will be removed recursively */
+ (void)debugfs_create_bool("warnings", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]);
+ (void)debugfs_create_bool("info", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]);
+ (void)debugfs_create_bool("debug", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]);
+ (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir,
+ &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]);
+#endif
+
+ return 0;
+}
+
+void b2r2_debug_exit(void)
+{
+#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS)
+ if (root_dir)
+ debugfs_remove_recursive(root_dir);
+#endif
+}
diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h
new file mode 100644
index 00000000000..e616e3ff473
--- /dev/null
+++ b/drivers/video/b2r2/b2r2_debug.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public Licence (GPL) version 2.
+ */
+
+#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_
+
+#include <linux/device.h>
+
+#ifdef CONFIG_B2R2_DEBUG
+
+/* Log macros */
+enum b2r2_log_levels {
+ B2R2_LOG_LEVEL_WARN,
+ B2R2_LOG_LEVEL_INFO,
+ B2R2_LOG_LEVEL_DEBUG,
+ B2R2_LOG_LEVEL_REGDUMP,
+ B2R2_LOG_LEVEL_COUNT,
+};
+
+/*
+ * Booleans controlling the different log levels. The different log levels are
+ * enabled separately (i.e. you can have info prints without the warn prints).
+ */
+extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT];
+
+extern struct device *b2r2_log_dev;
+
+#define b2r2_log_err(...) \
+ do { \
+ dev_err(b2r2_log_dev, __VA_ARGS__); \
+ } while (0)
+
+#define b2r2_log_warn(...) \
+ do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \
+ dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \
+ } while (0)
+
+#define b2r2_log_info(...) \
+ do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \
+ dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \
+ } while (0)
+
+#define b2r2_log_debug(...) \
+ do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \
+ dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \
+ } while (0)
+
+#define b2r2_log_regdump(...) \
+ do { \
+ if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \
+ dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \
+ } while (0)
+
+int b2r2_debug_init(struct device *log_dev);
+void b2r2_debug_exit(void);
+
+#else
+
+#define b2r2_log_err(...)
+#define b2r2_log_warn(...)
+#define b2r2_log_info(...)
+#define b2r2_log_regdump(...)
+
+static inline int b2r2_debug_init(struct device *log_dev)
+{
+ return 0;
+}
+static inline void b2r2_debug_exit(void)
+{
+ return;
+}
+
+#endif
+
+#endif
diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c
index 934ab4523a8..928b33a417f 100644
--- a/drivers/video/b2r2/b2r2_generic.c
+++ b/drivers/video/b2r2/b2r2_generic.c
@@ -3,23 +3,16 @@
#include "b2r2_internal.h"
#include "b2r2_global.h"
+#include "b2r2_debug.h"
/******************
* Debug printing
******************/
-static u32 debug = 0;
-static u32 debug_areas = 0;
-static u32 verbose = 0;
-static u32 errors = 1;
+static bool debug_areas;
+static bool verbose;
#define B2R2_GENERIC_DEBUG
-#define pdebug(...) \
- do { \
- if (debug) \
- printk(KERN_INFO __VA_ARGS__); \
- } while (false)
-
#define pdebug_areas(...) \
do { \
if (debug_areas) \
@@ -32,12 +25,6 @@ static u32 errors = 1;
printk(KERN_INFO __VA_ARGS__); \
} while (false)
-#define err_msg(...) \
- do { \
- if (errors) \
- printk(KERN_INFO __VA_ARGS__); \
- } while (false)
-
#define PTRACE_ENTRY() pdebug(LOG_TAG "::%s\n", __func__)
#define LOG_TAG "b2r2_generic"
@@ -55,7 +42,7 @@ static u32 errors = 1;
*/
static void reset_nodes(struct b2r2_node *node)
{
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
while (node != NULL) {
memset(&(node->node), 0, sizeof(node->node));
@@ -68,7 +55,7 @@ static void reset_nodes(struct b2r2_node *node)
node = node->next;
}
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
/**
@@ -79,7 +66,7 @@ static void dump_nodes(struct b2r2_node *first)
struct b2r2_node *node = first;
pverbose("%s ENTRY\n", __func__);
while (node != NULL) {
- pverbose( "\nNODE START:\n=============\n");
+ pverbose("\nNODE START:\n=============\n");
pverbose("B2R2_ACK: \t0x%.8x\n",
node->node.GROUP0.B2R2_ACK);
pverbose("B2R2_INS: \t0x%.8x\n",
@@ -300,7 +287,7 @@ static unsigned int get_pitch(enum b2r2_blt_fmt format, u32 width)
return width;
break;
default:
- err_msg("%s: Unable to determine pitch "
+ b2r2_log_warn("%s: Unable to determine pitch "
"for fmt=%#010x width=%d\n", __func__,
format, width);
return 0;
@@ -313,7 +300,7 @@ static s32 validate_buf(const struct b2r2_blt_img *image)
u32 pitch;
if (image->width <= 0 || image->height <= 0) {
- err_msg("%s Error: width=%d or height=%d negative.\n", __func__,
+ b2r2_log_warn("%s: width=%d or height=%d negative.\n", __func__,
image->width, image->height);
return -EINVAL;
}
@@ -359,14 +346,14 @@ static s32 validate_buf(const struct b2r2_blt_img *image)
}
if (image->buf.len < expect_buf_size) {
- err_msg("%s Error: Invalid buffer size:"
+ b2r2_log_warn("%s: Invalid buffer size:"
"\nfmt=%#010x buf.len=%d expect_buf_size=%d\n", __func__,
image->fmt, image->buf.len, expect_buf_size);
return -EINVAL;
}
if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) {
- err_msg("%s Error: Virtual pointers not supported yet.\n",
+ b2r2_log_warn("%s: Virtual pointers not supported yet.\n",
__func__);
return -EINVAL;
}
@@ -399,7 +386,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
bool yuv_planar = src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR ||
src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR;
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
if ((req->user_req.flags &
(B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0) {
@@ -407,7 +394,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
u32 src_color = req->user_req.src_color;
/* Determine format in src_color */
- switch(dst_img->fmt) {
+ switch (dst_img->fmt) {
/* ARGB formats */
case B2R2_BLT_FMT_16_BIT_ARGB4444:
case B2R2_BLT_FMT_16_BIT_ARGB1555:
@@ -486,7 +473,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
return;
}
@@ -497,7 +484,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
src_pitch = src_img->pitch;
}
- pdebug("%s transform=%#010x\n", __func__, req->user_req.transform);
+ b2r2_log_info("%s transform=%#010x\n", __func__, req->user_req.transform);
if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
h_scf = (src_rect->width << 10) / dst_rect->height;
v_scf = (src_rect->height << 10) / dst_rect->width;
@@ -508,7 +495,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
/* Configure horizontal rescale */
if (h_scf != (1 << 10)) {
- pdebug("%s: Scaling horizontally by 0x%.8x"
+ b2r2_log_info("%s: Scaling horizontally by 0x%.8x"
"\ns(%d, %d)->d(%d, %d)\n", __func__,
h_scf, src_rect->width, src_rect->height,
dst_rect->width, dst_rect->height);
@@ -520,7 +507,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
/* Configure vertical rescale */
if (v_scf != (1 << 10)) {
- pdebug("%s: Scaling vertically by 0x%.8x"
+ b2r2_log_info("%s: Scaling vertically by 0x%.8x"
"\ns(%d, %d)->d(%d, %d)\n", __func__,
v_scf, src_rect->width, src_rect->height,
dst_rect->width, dst_rect->height);
@@ -726,7 +713,7 @@ static void setup_input_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
static void setup_transform_stage(const struct b2r2_blt_request *req,
@@ -738,7 +725,7 @@ static void setup_transform_stage(const struct b2r2_blt_request *req,
enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM;
enum b2r2_blt_transform transform = req->user_req.transform;
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) {
/* Scan order must be flipped otherwise contents will
@@ -770,7 +757,7 @@ static void setup_transform_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3;
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
/*
@@ -793,10 +780,10 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
dst_pitch = dst_img->pitch;
}
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
- pdebug("%s ABGR on dst_read\n", __func__);
+ b2r2_log_info("%s ABGR on dst_read\n", __func__);
/* Set up IVMX */
node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED;
@@ -829,7 +816,7 @@ static void setup_dst_read_stage(const struct b2r2_blt_request *req,
node->node.GROUP0.B2R2_INS |=
B2R2_INS_SOURCE_2_FETCH_FROM_MEM;
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
static void setup_blend_stage(const struct b2r2_blt_request *req,
@@ -838,7 +825,7 @@ static void setup_blend_stage(const struct b2r2_blt_request *req,
struct b2r2_work_buf *fg_buf)
{
u32 global_alpha = req->user_req.global_alpha;
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
node->node.GROUP0.B2R2_ACK = 0;
@@ -927,7 +914,7 @@ static void setup_blend_stage(const struct b2r2_blt_request *req,
B2R2_TY_VSO_TOP_TO_BOTTOM;
}
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
static void setup_writeback_stage(const struct b2r2_blt_request *req,
@@ -943,10 +930,10 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
dst_pitch = dst_img->pitch;
}
- pdebug("%s ENTRY\n", __func__);
+ b2r2_log_info("%s ENTRY\n", __func__);
if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) {
- pdebug("%s ABGR on writeback\n", __func__);
+ b2r2_log_info("%s ABGR on writeback\n", __func__);
/* Set up OVMX */
node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED;
@@ -980,7 +967,7 @@ static void setup_writeback_stage(const struct b2r2_blt_request *req,
B2R2_TY_HSO_LEFT_TO_RIGHT |
B2R2_TY_VSO_TOP_TO_BOTTOM;
- pdebug("%s DONE\n", __func__);
+ b2r2_log_info("%s DONE\n", __func__);
}
/*******************
@@ -1008,7 +995,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if (req == NULL || work_buf_width == NULL || work_buf_height == NULL ||
work_buf_count == NULL || node_count == NULL) {
- err_msg("%s Error: Invalid in or out pointers:\n"
+ b2r2_log_warn("%s: Invalid in or out pointers:\n"
"req=0x%p\n"
"work_buf_width=0x%p work_buf_height=0x%p work_buf_count=0x%p\n"
"node_count=0x%p.\n",
@@ -1033,7 +1020,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 &&
(req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY) != 0) {
- printk(KERN_INFO "%s Error: Invalid combination: source and "
+ b2r2_log_warn("%s: Invalid combination: source and "
"destination color keying.\n", __func__);
return -EINVAL;
}
@@ -1044,7 +1031,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
B2R2_BLT_FLAG_DEST_COLOR_KEY))) {
- printk(KERN_INFO "%s Error: Invalid combination: source_fill and color keying.\n",
+ b2r2_log_warn("%s: Invalid combination: source_fill and color keying.\n",
__func__);
return -EINVAL;
}
@@ -1055,7 +1042,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
- printk(KERN_INFO "%s Error: Invalid combination: blending and color keying.\n",
+ b2r2_log_warn("%s: Invalid combination: blending and color keying.\n",
__func__);
return -EINVAL;
}
@@ -1064,7 +1051,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(req->user_req.flags &
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) {
- printk(KERN_INFO "%s Error: Invalid combination: source mask and color keying.\n",
+ b2r2_log_warn("%s: Invalid combination: source mask and color keying.\n",
__func__);
return -EINVAL;
}
@@ -1073,7 +1060,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
(B2R2_BLT_FLAG_DEST_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_COLOR_KEY |
B2R2_BLT_FLAG_SOURCE_MASK)) {
- printk(KERN_INFO "%s Error: Unsupported: source mask, color keying.\n", __func__);
+ b2r2_log_warn("%s: Unsupported: source mask, color keying.\n", __func__);
return -ENOSYS;
}
@@ -1084,7 +1071,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 ||
src_rect.x + src_rect.width > req->user_req.src_img.width ||
src_rect.y + src_rect.height > req->user_req.src_img.height)) {
- printk(KERN_INFO "%s Error: src_rect outside src_img:\n"
+ b2r2_log_warn("%s: src_rect outside src_img:\n"
"src(x,y,w,h)=(%d, %d, %d, %d) src_img(w,h)=(%d, %d).\n",
__func__,
src_rect.x, src_rect.y, src_rect.width, src_rect.height,
@@ -1093,7 +1080,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
}
if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) {
- printk(KERN_INFO "%s Error: Invalid source dimensions:\n"
+ b2r2_log_warn("%s: Invalid source dimensions:\n"
"src(w,h)=(%d, %d).\n",
__func__,
src_rect.width, src_rect.height);
@@ -1101,7 +1088,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
}
if (dst_rect.width <= 0 || dst_rect.height <= 0) {
- printk(KERN_INFO "%s Error: Invalid dest dimensions:\n"
+ b2r2_log_warn("%s: Invalid dest dimensions:\n"
"dst(w,h)=(%d, %d).\n",
__func__,
dst_rect.width, dst_rect.height);
@@ -1123,7 +1110,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
*work_buf_count = n_work_bufs;
*node_count = n_nodes;
/* EMACSOC TODO: Account for multi-buffer format during writeback stage */
- pdebug("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
+ b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
__func__,
*work_buf_width, *work_buf_height, *work_buf_count, *node_count);
return 0;
@@ -1145,7 +1132,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
* Check for degenerate/out_of_range scaling factors.
*/
if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) {
- err_msg("%s Error: Dimensions result in degenerate or "
+ b2r2_log_warn("%s: Dimensions result in degenerate or "
"out of range scaling:\n"
"src(w,h)=(%d, %d) "
"dst(w,h)=(%d,%d).\n"
@@ -1162,7 +1149,7 @@ int b2r2_generic_analyze(const struct b2r2_blt_request *req,
*work_buf_count = n_work_bufs;
*node_count = n_nodes;
/* EMACSOC TODO: Account for multi-buffer format during writeback stage */
- pdebug("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
+ b2r2_log_info("%s DONE buf_w=%d buf_h=%d buf_count=%d node_count=%d\n",
__func__,
*work_buf_width, *work_buf_height, *work_buf_count, *node_count);
return 0;
@@ -1189,7 +1176,7 @@ int b2r2_generic_configure(const struct b2r2_blt_request *req,
int invalid_req = b2r2_generic_analyze(req, &work_buf_width, &work_buf_height,
&needed_bufs, &needed_nodes);
if (invalid_req < 0) {
- err_msg("%s Error: Invalid request supplied, error=%d\n",
+ b2r2_log_warn("%s: Invalid request supplied, ec=%d\n",
__func__, invalid_req);
return -EINVAL;
} else {
@@ -1200,13 +1187,13 @@ int b2r2_generic_configure(const struct b2r2_blt_request *req,
node = node->next;
}
if (n_nodes < needed_nodes) {
- err_msg("%s Error: Not enough nodes %d < %d.\n",
+ b2r2_log_warn("%s: Not enough nodes %d < %d.\n",
__func__, n_nodes, needed_nodes);
return -EINVAL;
}
if (buf_count < needed_bufs) {
- err_msg("%s Error: Not enough buffers %d < %d.\n",
+ b2r2_log_warn("%s: Not enough buffers %d < %d.\n",
__func__, buf_count, needed_bufs);
return -EINVAL;
}
diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h
index 27081254a2e..a0835fcc1f3 100644
--- a/drivers/video/b2r2/b2r2_internal.h
+++ b/drivers/video/b2r2/b2r2_internal.h
@@ -343,4 +343,14 @@ struct b2r2_node *b2r2_blt_alloc_nodes(int node_count);
*/
void b2r2_blt_free_nodes(struct b2r2_node *first_node);
+/**
+ * b2r2_blt_module_init() - Initialize the B2R2 blt module
+ */
+int b2r2_blt_module_init(void);
+
+/**
+ * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module
+ */
+void b2r2_blt_module_exit(void);
+
#endif
diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c
index 7149d0a5edf..eaff94c57b1 100644
--- a/drivers/video/b2r2/b2r2_node_split.c
+++ b/drivers/video/b2r2/b2r2_node_split.c
@@ -21,12 +21,11 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
-#include <asm/dma-mapping.h>
-
#include "b2r2_node_split.h"
#include "b2r2_internal.h"
#include "b2r2_hw.h"
+#include "b2r2_debug.h"
/******************
* Debug printing
@@ -34,10 +33,10 @@
static u32 debug;
static u32 verbose;
-static u32 hf_coeffs_addr = 0;
-static u32 vf_coeffs_addr = 0;
-static void *hf_coeffs = NULL;
-static void *vf_coeffs = NULL;
+static u32 hf_coeffs_addr;
+static u32 vf_coeffs_addr;
+static void *hf_coeffs;
+static void *vf_coeffs;
#define HF_TABLE_SIZE 64
#define VF_TABLE_SIZE 40
@@ -192,8 +191,6 @@ static int constrain_window(struct b2r2_blt_rect *window,
enum b2r2_blt_fmt fmt, u32 max_size);
static int setup_tmp_buf(struct b2r2_node_split_buf *this, u32 max_size,
enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height);
-static int calculate_tile_count(s32 area_width, s32 area_height, s32 tile_width,
- s32 tile_height);
static inline enum b2r2_ty get_alpha_range(enum b2r2_blt_fmt fmt);
static inline u32 set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color);
@@ -230,7 +227,6 @@ static void set_src_3(struct b2r2_node *node, u32 addr,
static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values);
static void reset_nodes(struct b2r2_node *node);
-static void dump_nodes(struct b2r2_node *first);
/********************
* Public functions
@@ -271,7 +267,7 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
&req->user_req.dst_img, &req->user_req.dst_rect, false,
0);
- pdebug(KERN_INFO LOG_TAG "::%s:\n"
+ b2r2_log_info("%s:\n"
"\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t"
"dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x,
this->src.rect.y, this->src.rect.width, this->src.rect.height,
@@ -368,8 +364,10 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
}
}
- if (ret < 0) {
- printk(KERN_ERR "%s: Analysis failed!\n", __func__);
+ if (ret == -ENOSYS) {
+ goto unsupported;
+ } else if (ret < 0) {
+ b2r2_log_warn("%s: Analysis failed!\n", __func__);
goto error;
}
@@ -408,7 +406,7 @@ int b2r2_node_split_analyze(const struct b2r2_blt_request *req,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
unsupported:
return ret;
}
@@ -455,7 +453,7 @@ int b2r2_node_split_configure(struct b2r2_node_split_job *this,
if (node == NULL) {
/* DOH! This is an internal error (to few nodes
allocated) */
- printk(KERN_ERR LOG_TAG "%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n",
__func__);
ret = -ENOMEM;
@@ -541,7 +539,7 @@ int b2r2_node_split_configure(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
{
int nbr_nodes = 0;
@@ -550,9 +548,9 @@ error:
first = first->next;
}
- printk(KERN_ERR LOG_TAG "::%s: Asked for %d nodes, got %d\n",
+ b2r2_log_warn("%s: Asked for %d nodes, got %d\n",
__func__, this->node_count, nbr_nodes);
- printk(KERN_ERR LOG_TAG "::%s: src.rect=(%4d, %4d, %4d, %4d)\t"
+ b2r2_log_warn("%s: src.rect=(%4d, %4d, %4d, %4d)\t"
"dst.rect=(%4d, %4d, %4d, %4d)\n", __func__,
this->src.rect.x, this->src.rect.y,
this->src.rect.width, this->src.rect.height,
@@ -588,23 +586,23 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
if (node->src_tmp_index) {
u32 addr = bufs[node->src_tmp_index - 1].phys_addr;
- pverbose(KERN_INFO LOG_TAG "::%s: "
- "%p Assigning %p as source", __func__,
+ b2r2_log_info("%s: "
+ "%p Assigning %p as source ", __func__,
node, (void *)addr);
BUG_ON(node->src_tmp_index > buf_count);
switch (node->src_index) {
case 1:
- pverbose("1\n");
+ b2r2_log_info("1\n");
node->node.GROUP3.B2R2_SBA = addr;
break;
case 2:
- pverbose("2\n");
+ b2r2_log_info("2\n");
node->node.GROUP4.B2R2_SBA = addr;
break;
case 3:
- pverbose("3\n");
+ b2r2_log_info("3\n");
node->node.GROUP5.B2R2_SBA = addr;
break;
default:
@@ -613,7 +611,7 @@ int b2r2_node_split_assign_buffers(struct b2r2_node_split_job *this,
}
}
- pverbose(KERN_INFO LOG_TAG "::%s: tba=%p\tsba=%p\n", __func__,
+ b2r2_log_info("%s: tba=%p\tsba=%p\n", __func__,
(void *)node->node.GROUP1.B2R2_TBA,
(void *)node->node.GROUP4.B2R2_SBA);
@@ -658,9 +656,9 @@ static int check_rect(const struct b2r2_blt_img *img,
/* Check rectangle dimensions*/
if ((rect->width <= 0) || (rect->height <= 0)) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Illegal rect (%d, %d, %d, %d)\n",
- __func__, rect->x,rect->y, rect->width, rect->height);
+ __func__, rect->x, rect->y, rect->width, rect->height);
ret = -EINVAL;
goto error;
}
@@ -681,14 +679,14 @@ static int check_rect(const struct b2r2_blt_img *img,
/* Check so that the rect isn't outside the buffer */
if ((l < 0) || (t < 0)) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"rect origin outside buffer\n", __func__);
ret = -EINVAL;
goto error;
}
if ((r > img->width) || (t > img->height)) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"rect ends outside buffer\n", __func__);
ret = -EINVAL;
goto error;
@@ -696,7 +694,7 @@ static int check_rect(const struct b2r2_blt_img *img,
/* Check so the intersected rectangle isn't empty */
if ((l == r) || (t == b)) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"rect is empty (width or height zero)\n",
__func__);
ret = -EINVAL;
@@ -705,7 +703,7 @@ static int check_rect(const struct b2r2_blt_img *img,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -763,7 +761,7 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
/* Destination must be raster for raw fill to work */
if ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) &&
(this->dst.type != B2R2_FMT_TYPE_RASTER)) {
- printk(KERN_ERR "%s: Raw fill requires raster destination\n",
+ b2r2_log_warn("%s: Raw fill requires raster destination\n",
__func__);
ret = -EINVAL;
goto error;
@@ -807,7 +805,7 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888;
} else {
/* Wait, what? */
- printk(KERN_ERR "%s: "
+ b2r2_log_warn("%s: "
"Illegal destination format for fill",
__func__);
ret = -EINVAL;
@@ -859,7 +857,7 @@ static int analyze_color_fill(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -959,7 +957,7 @@ static int analyze_transform(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
unsupported:
return ret;
}
@@ -1035,7 +1033,7 @@ static int analyze_copy(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1082,12 +1080,12 @@ static int analyze_rot_scale(struct b2r2_node_split_job *this,
if (ret < 0)
goto error;
- pdebug(KERN_INFO LOG_TAG "::%s: tmp_width=%d\ttmp_height=%d\n",
+ b2r2_log_info("%s: tmp_width=%d\ttmp_height=%d\n",
__func__, tmp_width, tmp_height);
- pdebug(KERN_INFO LOG_TAG "::%s: "
+ b2r2_log_info("%s: "
"tmp.rect.width=%d\ttmp.rect.height=%d\n", __func__,
tmp->rect.width, tmp->rect.height);
- pdebug(KERN_INFO LOG_TAG "::%s: hsf=%d\tvsf=%d\n",
+ b2r2_log_info("%s: hsf=%d\tvsf=%d\n",
__func__, this->horiz_sf, this->vert_sf);
this->work_bufs[0].size = tmp->pitch * tmp->height;
@@ -1122,14 +1120,14 @@ static int analyze_rot_scale(struct b2r2_node_split_job *this,
this->type = B2R2_SCALE_AND_ROTATE;
- pdebug(KERN_INFO LOG_TAG "::%s: Rot scale:\n", __func__);
- pdebug(KERN_INFO LOG_TAG "::%s: node_count=%d\n", __func__,
+ b2r2_log_info("%s: Rot scale:\n", __func__);
+ b2r2_log_info("%s: node_count=%d\n", __func__,
this->node_count);
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1180,13 +1178,13 @@ static int calculate_rot_scale_node_count(struct b2r2_node_split_job *this,
else
dst_bottom_height = 0;
- pdebug(KERN_INFO "dst.window=\t(%4d, %4d, %4d, %4d)\n",
+ b2r2_log_info("dst.window=\t(%4d, %4d, %4d, %4d)\n",
this->dst.window.x, this->dst.window.y, this->dst.window.width,
this->dst.window.height);
- pdebug(KERN_INFO "src.window=\t(%4d, %4d, %4d, %4d)\n",
+ b2r2_log_info("src.window=\t(%4d, %4d, %4d, %4d)\n",
this->src.window.x, this->src.window.y, this->src.window.width,
this->src.window.height);
- pdebug(KERN_INFO "right_width=%d, bottom_height=%d\n", dst_right_width,
+ b2r2_log_info("right_width=%d, bottom_height=%d\n", dst_right_width,
dst_bottom_height);
/* Update the rot_count and scale_count with all the "inner" tiles */
@@ -1195,7 +1193,7 @@ static int calculate_rot_scale_node_count(struct b2r2_node_split_job *this,
rot_count = tile_rots * nbr_full_cols * nbr_full_rows;
scale_count = nbr_full_cols * nbr_full_rows;
- pdebug(KERN_INFO LOG_TAG "::%s: inner=%d\n", __func__,
+ b2r2_log_info("%s: inner=%d\n", __func__,
tile_rots);
/* Update with "right tile" rotations (one tile per row) */
@@ -1204,7 +1202,7 @@ static int calculate_rot_scale_node_count(struct b2r2_node_split_job *this,
rot_count += tile_rots * nbr_full_rows;
scale_count += nbr_full_rows;
- pdebug(KERN_INFO LOG_TAG "::%s: right=%d\n", __func__,
+ b2r2_log_info("%s: right=%d\n", __func__,
tile_rots);
}
@@ -1215,7 +1213,7 @@ static int calculate_rot_scale_node_count(struct b2r2_node_split_job *this,
rot_count += tile_rots * nbr_full_cols;
scale_count += nbr_full_cols;
- pdebug(KERN_INFO LOG_TAG "::%s: bottom=%d\n", __func__,
+ b2r2_log_info("%s: bottom=%d\n", __func__,
tile_rots);
}
@@ -1225,22 +1223,22 @@ static int calculate_rot_scale_node_count(struct b2r2_node_split_job *this,
rot_count += tile_rots;
scale_count++;
- pdebug(KERN_INFO LOG_TAG "::%s: bottom_right=%d\n", __func__,
+ b2r2_log_info("%s: bottom_right=%d\n", __func__,
tile_rots);
}
/* Finally calculate the total node count */
this->node_count = (scale_count + rot_count) * copy_count;
- pdebug(KERN_INFO LOG_TAG "::%s: nbr_full_cols=%d, nbr_full_rows=%d\n",
+ b2r2_log_info("%s: nbr_full_cols=%d, nbr_full_rows=%d\n",
__func__, nbr_full_cols, nbr_full_rows);
- pdebug(KERN_INFO LOG_TAG "::%s: node_count=%d\n", __func__,
+ b2r2_log_info("%s: node_count=%d\n", __func__,
this->node_count);
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1355,7 +1353,7 @@ static int analyze_scaling(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1452,7 +1450,7 @@ static int analyze_rotation(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1496,7 +1494,7 @@ static int analyze_scale_factors(struct b2r2_node_split_job *this)
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1609,7 +1607,7 @@ static int configure_tile(struct b2r2_node_split_job *this,
goto error;
break;
default:
- printk(KERN_ERR "%s: Unsupported request\n", __func__);
+ b2r2_log_warn("%s: Unsupported request\n", __func__);
ret = -ENOSYS;
goto error;
@@ -1622,7 +1620,7 @@ static int configure_tile(struct b2r2_node_split_job *this,
/* Configure blending and clipping */
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n",
__func__);
ret = -ENOMEM;
@@ -1647,7 +1645,7 @@ static int configure_tile(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1669,7 +1667,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
struct b2r2_blt_rect dst_win;
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: Out of nodes!\n",
+ b2r2_log_warn("%s: Out of nodes!\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -1683,8 +1681,8 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
tmp->rect.height = this->dst.window.width;
memcpy(&tmp->window, &tmp->rect, sizeof(tmp->window));
- pdebug(KERN_INFO LOG_TAG "::%s:Rot rescale:\n", __func__);
- pdebug(KERN_INFO LOG_TAG "::%s:\tsrc=(%4d, %4d, %4d, %4d)\t"
+ b2r2_log_info("%s:Rot rescale:\n", __func__);
+ b2r2_log_info("%s:\tsrc=(%4d, %4d, %4d, %4d)\t"
"tmp=(%4d, %4d, %4d, %4d)\n", __func__, this->src.window.x,
this->src.window.y, this->src.window.width,
this->src.window.height, tmp->window.x, tmp->window.y,
@@ -1707,7 +1705,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
rot_start = node;
- pdebug(KERN_INFO LOG_TAG "::%s: tmp_rect=(%d, %d, %d, %d)\n", __func__,
+ b2r2_log_info("%s: tmp_rect=(%d, %d, %d, %d)\n", __func__,
tmp->rect.x, tmp->rect.y, tmp->rect.width, tmp->rect.height);
do {
@@ -1715,7 +1713,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
last_row = tmp->window.y + tmp->dy >= tmp->rect.height;
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: Out of nodes!\n",
+ b2r2_log_warn("%s: Out of nodes!\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -1731,7 +1729,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
this->dst.window.width = tmp->window.height;
}
- pdebug(KERN_INFO LOG_TAG "::%s: \ttmp=(%4d, %4d, %4d, %4d) "
+ b2r2_log_info("%s: \ttmp=(%4d, %4d, %4d, %4d) "
"\tdst=(%4d, %4d, %4d, %4d)\n", __func__, tmp->window.x,
tmp->window.y, tmp->window.width, tmp->window.height,
this->dst.window.x, this->dst.window.y,
@@ -1759,12 +1757,12 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
/* Configure blending and clipping for the rotation nodes */
node = rot_start;
- pdebug(KERN_INFO LOG_TAG "::%s: "
+ b2r2_log_info("%s: "
"Configuring clipping and blending. rot_start=%p, last=%p\n",
__func__, rot_start, last);
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: Out of nodes!\n",
+ b2r2_log_warn("%s: Out of nodes!\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -1786,7 +1784,7 @@ static int configure_rot_scale(struct b2r2_node_split_job *this,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1882,7 +1880,7 @@ static int configure_fill(struct b2r2_node *node, u32 color,
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -1939,7 +1937,7 @@ static int configure_fill(struct b2r2_node *node, u32 color,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -1980,7 +1978,7 @@ static int configure_copy(struct b2r2_node *node,
/* Configure the source for each node */
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
" Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -1999,7 +1997,7 @@ static int configure_copy(struct b2r2_node *node,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2030,7 +2028,7 @@ static int configure_rotate(struct b2r2_node *node,
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -2047,7 +2045,7 @@ static int configure_rotate(struct b2r2_node *node,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2075,7 +2073,7 @@ static int configure_scale(struct b2r2_node *node,
u32 rsf = 0;
u32 rzi = 0;
- pdebug(KERN_INFO LOG_TAG "::%s:\n"
+ b2r2_log_info("%s:\n"
"\t\tsrc=(%4d, %4d, %4d, %4d)\tdst=(%4d, %4d, %4d, %4d)\n",
__func__, src->window.x, src->window.y, src->window.width,
src->window.height, dst->window.x, dst->window.y,
@@ -2103,7 +2101,7 @@ static int configure_scale(struct b2r2_node *node,
do {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -2159,7 +2157,7 @@ static int configure_scale(struct b2r2_node *node,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2300,7 +2298,7 @@ static int configure_dst(struct b2r2_node *node,
for (i = 0; i < nbr_planes; i++) {
if (node == NULL) {
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Internal error! Out of nodes!\n", __func__);
ret = -ENOMEM;
goto error;
@@ -2319,7 +2317,7 @@ static int configure_dst(struct b2r2_node *node,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2510,7 +2508,7 @@ static int constrain_window(struct b2r2_blt_rect *window,
window->height = MIN(window->height, max_size / pitch);
if (window->height == 0) {
- printk(KERN_ERR LOG_TAG "::%s: Not enough tmp mem\n",
+ b2r2_log_warn("%s: Not enough tmp mem\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -2519,7 +2517,7 @@ static int constrain_window(struct b2r2_blt_rect *window,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2547,7 +2545,7 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
fmt = B2R2_BLT_FMT_32_BIT_AYUV8888;
} else {
/* Wait, what? */
- printk(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Cannot create tmp buf from this fmt (%d)\n", __func__,
pref_fmt);
ret = -EINVAL;
@@ -2570,7 +2568,7 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
/* We should at least have enough room for one scanline */
if (height == 0) {
- printk(KERN_ERR LOG_TAG "::%s: Not enough tmp mem!\n",
+ b2r2_log_warn("%s: Not enough tmp mem!\n",
__func__);
ret = -ENOMEM;
goto error;
@@ -2589,7 +2587,7 @@ static int setup_tmp_buf(struct b2r2_node_split_buf *tmp, u32 max_size,
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
@@ -2964,13 +2962,13 @@ static inline bool is_transform(const struct b2r2_blt_request *req)
static inline int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
{
int ret;
-
- /* Assume normal nearest neighbor scaling:
-
- sf = (src - min_step) / (dst - 1)
- */
u32 sf;
+ /*
+ * Assume normal nearest neighbor scaling:
+ *
+ * sf = (src - min_step) / (dst - 1)
+ */
if (to > 1)
sf = ((from << 10) - 1) / (to - 1);
else
@@ -2978,12 +2976,12 @@ static inline int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
if ((sf & 0xffff0000) != 0) {
/* Overflow error */
- pverbose(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Scale factor too large\n", __func__);
ret = -EINVAL;
goto error;
} else if (sf == 0) {
- pverbose(KERN_ERR LOG_TAG "::%s: "
+ b2r2_log_warn("%s: "
"Scale factor too small\n", __func__);
ret = -EINVAL;
goto error;
@@ -2994,38 +2992,11 @@ static inline int calculate_scale_factor(u32 from, u32 to, u16 *sf_out)
return 0;
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return ret;
}
/**
- * calculate_tile_count() - calculates how many tiles will fit
- */
-static int calculate_tile_count(s32 area_width, s32 area_height, s32 tile_width,
- s32 tile_height)
-{
- int nbr_cols;
- int nbr_rows;
-
- pverbose(KERN_INFO LOG_TAG "::%s: area=(%d, %d) tile=(%d, %d)\n",
- __func__, area_width, area_height, tile_width,
- tile_height);
-
- if (area_width == 0 || area_height == 0 || tile_width == 0 ||
- tile_height == 0)
- return 0;
-
- nbr_cols = area_width / tile_width;
- if (area_width % tile_width)
- nbr_cols++;
- nbr_rows = area_height / tile_height;
- if (area_height % tile_height)
- nbr_rows++;
-
- return nbr_cols * nbr_rows;
-}
-
-/**
* rescale() - rescales the given dimension
*/
static inline s32 rescale(s32 dim, u16 sf)
@@ -3319,15 +3290,12 @@ static void reset_nodes(struct b2r2_node *node)
if (node->next != NULL)
node->node.GROUP0.B2R2_NIP =
node->next->physical_address;
-/*
- else
- node->node.GROUP0.B2R2_INS =
- B2R2_INS_BLITCOMPIRQ_ENABLED;
-*/
node = node->next;
}
}
+/* Commented for now, will be useful later... */
+#if 0
/**
* dump_nodes() - prints the node list
*/
@@ -3415,6 +3383,7 @@ static void dump_nodes(struct b2r2_node *first)
node = node->next;
}
}
+#endif
#ifdef CONFIG_DEBUG_FS
static struct dentry *dir;
@@ -3442,7 +3411,7 @@ error_free_debug:
error_free_dir:
debugfs_remove(dir);
error:
- printk(KERN_ERR LOG_TAG "::%s: ERROR!\n", __func__);
+ b2r2_log_warn("%s: Exit...\n", __func__);
return -1;
}